commit 76880301e4da71040e5ed11aef16ef64796057fc Author: Enis Soztutar Date: Wed Apr 23 20:28:52 2014 -0700 v1 diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index b92aabd..3c5968b 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -18,7 +18,14 @@ */ package org.apache.hadoop.hbase.client; -import com.google.protobuf.ServiceException; +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ClusterStatus; @@ -45,18 +52,17 @@ import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; import org.apache.hadoop.hbase.util.Pair; -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.regex.Pattern; +import com.google.protobuf.ServiceException; /** - * The administrative API for HBase. Obtain an instance from an {@link HConnection}. - * - * @since 0.99.0 + * The administrative API for HBase. Obtain an instance from a {@link Connection}. */ +// TODO: should Admin interface contain metadata (DDL) related stuff together with +// real admin stuff (getClusterStatus, shutdown, etc. Should we break the interfaces? +@InterfaceAudience.Public +@InterfaceStability.Evolving public interface Admin extends Abortable, Closeable { + int getOperationTimeout(); @Override @@ -66,9 +72,9 @@ public interface Admin extends Abortable, Closeable { boolean isAborted(); /** - * @return HConnection used by this object. + * @return Connection used by this object. */ - HConnection getConnection(); + Connection getConnection(); /** * @return - true if the master server is running. Throws an exception otherwise. @@ -85,9 +91,7 @@ public interface Admin extends Abortable, Closeable { boolean tableExists(final TableName tableName) throws IOException; /** - * List all the userspace tables. In other words, scan the hbase:meta table. If we wanted this to - * be really fast, we could implement a special catalog table that just contains table names and - * their descriptors. Right now, it only exists as part of the hbase:meta table's region info. + * List all the userspace tables by scaning the hbase:meta table. * * @return - returns an array of HTableDescriptors * @throws IOException if a remote or network exception occurs @@ -471,101 +475,67 @@ public interface Admin extends Abortable, Closeable { List getOnlineRegions(final ServerName sn) throws IOException; /** - * Flush a table or an individual region. Synchronous operation. - * - * @param tableNameOrRegionName table or region to flush - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - void flush(final String tableNameOrRegionName) throws IOException, InterruptedException; - - /** - * Flush a table or an individual region. Synchronous operation. - * - * @param tableNameOrRegionName table or region to flush - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - void flush(final byte[] tableNameOrRegionName) throws IOException, InterruptedException; - - /** - * Compact a table or an individual region. Asynchronous operation. + * Flush an individual region. Synchronous operation. * - * @param tableNameOrRegionName table or region to compact + * @param regionName region to flush * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - void compact(final String tableNameOrRegionName) throws IOException, InterruptedException; + void flushRegion(final byte[] regionName) throws IOException, InterruptedException; /** - * Compact a table or an individual region. Asynchronous operation. + * Flush all regions of a table. Synchronous operation. * - * @param tableNameOrRegionName table or region to compact + * @param tableName table to flush * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - void compact(final byte[] tableNameOrRegionName) throws IOException, InterruptedException; + void flushTable(final TableName tableName) throws IOException, InterruptedException; /** - * Compact a column family within a table or region. Asynchronous operation. + * Compact an individual region. Asynchronous operation. * - * @param tableOrRegionName table or region to compact - * @param columnFamily column family within a table or region + * @param regionName region to compact + * @param isMajor whether to do a major or minor compaction * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - void compact(String tableOrRegionName, String columnFamily) + void compactRegion(final byte[] regionName, boolean isMajor) throws IOException, InterruptedException; /** - * Compact a column family within a table or region. Asynchronous operation. + * Compact a column family within a region. Asynchronous operation. * - * @param tableNameOrRegionName table or region to compact - * @param columnFamily column family within a table or region + * @param regionName region to compact + * @param columnFamily column family to compact + * @param isMajor whether to do a major or minor compaction * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily) + void compactRegion(final byte[] regionName, final byte[] columnFamily, boolean isMajor) throws IOException, InterruptedException; /** - * Major compact a table or an individual region. Asynchronous operation. + * Compact all regions of a table. Asynchronous operation. * - * @param tableNameOrRegionName table or region to major compact + * @param tableName table to compact + * @param isMajor whether to do a major or minor compaction * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - void majorCompact(final String tableNameOrRegionName) throws IOException, InterruptedException; - - /** - * Major compact a table or an individual region. Asynchronous operation. - * - * @param tableNameOrRegionName table or region to major compact - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - void majorCompact(final byte[] tableNameOrRegionName) throws IOException, InterruptedException; - - /** - * Major compact a column family within a table or region. Asynchronous operation. - * - * @param tableNameOrRegionName table or region to major compact - * @param columnFamily column family within a table or region - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - void majorCompact(final String tableNameOrRegionName, final String columnFamily) + void compactTable(final TableName tableName, boolean isMajor) throws IOException, InterruptedException; /** - * Major compact a column family within a table or region. Asynchronous operation. + * Compact a column family within a table. Asynchronous operation. * - * @param tableNameOrRegionName table or region to major compact + * @param tableName table to compact * @param columnFamily column family within a table or region + * @param isMajor whether to do a major or minor compaction * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily) + void compactTable(final TableName tableName, final byte[] columnFamily, boolean isMajor) throws IOException, InterruptedException; /** diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java index fc5c347..6f93f8d 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java @@ -34,9 +34,10 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; /** Internal methods on HConnection that should not be used by user code. */ @InterfaceAudience.Private // NOTE: DO NOT make this class public. It was made package-private on purpose. -interface ClusterConnection extends HConnection { +interface ClusterConnection extends HConnection, Connection { /** @return - true if the master server is running */ + @Override boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException; @@ -52,9 +53,10 @@ interface ClusterConnection extends HConnection { * @throws IOException * if a remote or network exception occurs */ + @Override boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException; - + /** * Find the location of the region of tableName that row * lives in. @@ -64,12 +66,14 @@ interface ClusterConnection extends HConnection { * question * @throws IOException if a remote or network exception occurs */ + @Override public HRegionLocation locateRegion(final TableName tableName, final byte [] row) throws IOException; /** * Allows flushing the region cache. */ + @Override void clearRegionCache(); /** @@ -78,12 +82,14 @@ interface ClusterConnection extends HConnection { * @param tableName Name of the table whose regions we are to remove from * cache. */ + @Override void clearRegionCache(final TableName tableName); /** * Deletes cached locations for the specific region. * @param location The location object for the region, to be purged from cache. */ + @Override void deleteCachedRegionLocation(final HRegionLocation location); /** @@ -95,6 +101,7 @@ interface ClusterConnection extends HConnection { * question * @throws IOException if a remote or network exception occurs */ + @Override HRegionLocation relocateRegion(final TableName tableName, final byte [] row) throws IOException; @@ -106,6 +113,7 @@ interface ClusterConnection extends HConnection { * @param exception the exception if any. Can be null. * @param source the previous location */ + @Override void updateCachedLocations(TableName tableName, byte[] rowkey, Object exception, ServerName source); @@ -117,6 +125,7 @@ interface ClusterConnection extends HConnection { * question * @throws IOException if a remote or network exception occurs */ + @Override HRegionLocation locateRegion(final byte[] regionName) throws IOException; @@ -126,6 +135,7 @@ interface ClusterConnection extends HConnection { * @return list of region locations for all regions of table * @throws IOException */ + @Override List locateRegions(final TableName tableName) throws IOException; /** @@ -137,6 +147,7 @@ interface ClusterConnection extends HConnection { * @return list of region locations for all regions of table * @throws IOException */ + @Override List locateRegions(final TableName tableName, final boolean useCache, final boolean offlined) throws IOException; @@ -145,6 +156,7 @@ interface ClusterConnection extends HConnection { /** * Returns a {@link MasterKeepAliveConnection} to the active master */ + @Override MasterService.BlockingInterface getMaster() throws IOException; @@ -154,6 +166,7 @@ interface ClusterConnection extends HConnection { * @return proxy for HRegionServer * @throws IOException if a remote or network exception occurs */ + @Override AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException; /** @@ -165,6 +178,7 @@ interface ClusterConnection extends HConnection { * @throws IOException if a remote or network exception occurs * */ + @Override ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException; /** @@ -175,6 +189,7 @@ interface ClusterConnection extends HConnection { * @return Location of row. * @throws IOException if a remote or network exception occurs */ + @Override HRegionLocation getRegionLocation(TableName tableName, byte [] row, boolean reload) throws IOException; @@ -183,6 +198,7 @@ interface ClusterConnection extends HConnection { * Clear any caches that pertain to server name sn. * @param sn A server name */ + @Override void clearCaches(final ServerName sn); /** @@ -191,6 +207,7 @@ interface ClusterConnection extends HConnection { * @return The shared instance. Never returns null. * @throws MasterNotRunningException */ + @Override @Deprecated MasterKeepAliveConnection getKeepAliveMasterService() throws MasterNotRunningException; @@ -199,12 +216,14 @@ interface ClusterConnection extends HConnection { * @param serverName * @return true if the server is known as dead, false otherwise. * @deprecated internal method, do not use thru HConnection */ + @Override @Deprecated boolean isDeadServer(ServerName serverName); /** * @return Nonce generator for this HConnection; may be null if disabled in configuration. */ + @Override public NonceGenerator getNonceGenerator(); /** diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java new file mode 100644 index 0000000..382bc04 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.ExecutorService; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.TableName; + +/** + * A cluster connection encapsulating lower level individual connections to actual servers and + * a connection to zookeeper. Connections are instantiated through the {@link ConnectionFactory} + * class. The lifecycle of the connection is managed by the caller, who has to {@link #close()} + * the connection to release the resources. + * + *

The connection object contains logic to find the master, locate regions out on the cluster, + * keeps a cache of locations and then knows how to re-calibrate after they move. The individual + * connections to servers, meta cache, zookeeper connection, etc are all shared by the + * {@link Table} and {@link Admin} instances obtained from this connection. + * + *

Connection creation is a heavy-weight operation. Connection implementations are thread-safe, + * so that the client can create a connection once, and share it with different threads. + * {@link Table} and {@link Admin} instances, on the other hand, are light-weight and are not + * thread-safe. Typically, a single connection per client application is instantiated and every + * thread will obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin} + * is not recommended. + * + *

This class replaces {@link HConnection}, which is now deprecated. + * @see ConnectionFactory + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface Connection extends Abortable, Closeable { + + /* + * Implementation notes: + * - Only allow new style of interfaces: + * -- All table names are passed as TableName. No more byte[] and string arguments + * -- Most of the classes with names H is deprecated in favor of non-H versions + * (Table, Connection vs HConnection, etc) + * -- Only real client-facing public methods are allowed + * - Connection should contain only createTable(), createAdmin() kind of general methods. listTables() kind + * of methods should be delegated to admin? TODO + */ + + /** + * @return Configuration instance being used by this HConnection instance. + */ + Configuration getConfiguration(); + + /** + * Retrieve an Table implementation for accessing a table. + * The returned Table is not thread safe, a new instance should be created for each using thread. + * This is a lightweight operation, pooling or caching of the returned Table + * is neither required nor desired. + * @param tableName the name of the table + * @return an Table to use for interactions with this table + */ + Table getTable(TableName tableName) throws IOException; + + /** + * Retrieve an Table implementation for accessing a table. + * The returned Table is not thread safe, a new instance should be created for each using thread. + * This is a lightweight operation, pooling or caching of the returned Table + * is neither required nor desired. + * @param tableName the name of the table + * @param pool The thread pool to use for batch operations, null to use a default pool. + * @return an Table to use for interactions with this table + */ + Table getTable(TableName tableName, ExecutorService pool) throws IOException; + + /** + * Retrieve an Table implementation for accessing a table. + * The returned Table is not thread safe, a new instance should be created for each using thread. + * This is a lightweight operation, pooling or caching of the returned Table + * is neither required nor desired. + * @param tableName the name of the table + * @param conf configuration to pass to the table instance. If null, connection's configuration + * will be used. + * @param pool The thread pool to use for batch operations, null to use a default pool. + * @return an Table to use for interactions with this table + */ + Table getTable(TableName tableName, Configuration conf, ExecutorService pool) throws IOException; + + /** + * Retrieve an Admin implementation to administer an HBase cluster. + * The returned Admin is not guaranteed to be thread-safe. A new instance should be created for + * each using thread. This is a lightweight operation, pooling or caching of the returned + * Admin is not recommended. + * + * @return an Admin instance for cluster administration + */ + Admin getAdmin() throws IOException; + + @Override + public void close() throws IOException; + + /** + * Returns whether the connection is closed or not. + * @return true if this connection is closed + */ + boolean isClosed(); + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java index 2762579..76c094d 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java @@ -111,6 +111,12 @@ class ConnectionAdapter implements ClusterConnection { } @Override + public Table getTable(TableName tableName, Configuration conf, ExecutorService pool) + throws IOException { + return wrappedConnection.getTable(tableName, conf, pool); + } + + @Override public Admin getAdmin() throws IOException { return wrappedConnection.getAdmin(); } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java new file mode 100644 index 0000000..2e67e33 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -0,0 +1,171 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.concurrent.ExecutorService; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.UserProvider; + + +/** + * A non-instantiable class that manages creation of {@link Connection}s. + * Managing the lifecycle of the {@link Connection}s to the cluster is the responsibility of + * the caller. + * From this {@link Connection} {@link Table} implementations are retrieved + * with {@link Connection#createTable(byte[])}. Example: + *

+ * {@code
+ * Connection connection = ConnectionFactory.createConnection(config);
+ * Table table = connection.getTable("table1");
+ * try {
+ *   // Use the table as needed, for a single operation and a single thread
+ * } finally {
+ *   table.close();
+ *   connection.close();
+ * }
+ * 
+ * + * This class replaces {@link HConnectionManager}, which is now deprecated. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ConnectionFactory { + + /** No public c.tors */ + protected ConnectionFactory() { + } + + /** + * Create a new Connection instance using the passed conf instance. Connection + * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces + * created from returned connection share zookeeper connection, meta cache, and connections + * to region servers and masters. + * The caller is responsible for calling {@link Connection#close()} on the returned + * connection instance. + * + * Typical usage: + *
+   * Connection connection = ConnectionFactory.createConnection(conf);
+   * Table table = connection.getTable("mytable");
+   * table.get(...);
+   * ...
+   * table.close();
+   * connection.close();
+   * 
+ * + * @param conf configuration + * @return Connection object for conf + */ + public static Connection createConnection(Configuration conf) throws IOException { + return createConnection(conf, null, null); + } + + /** + * Create a new Connection instance using the passed conf instance. Connection + * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces + * created from returned connection share zookeeper connection, meta cache, and connections + * to region servers and masters. + * The caller is responsible for calling {@link Connection#close()} on the returned + * connection instance. + * + * Typical usage: + *
+   * Connection connection = ConnectionFactory.createConnection(conf);
+   * Table table = connection.getTable("mytable");
+   * table.get(...);
+   * ...
+   * table.close();
+   * connection.close();
+   * 
+ * + * @param conf configuration + * @param pool the thread pool to use for batch operations + * @return Connection object for conf + */ + public static Connection createConnection(Configuration conf, ExecutorService pool) + throws IOException { + return createConnection(conf, pool, null); + } + + /** + * Create a new Connection instance using the passed conf instance. Connection + * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces + * created from returned connection share zookeeper connection, meta cache, and connections + * to region servers and masters. + * The caller is responsible for calling {@link Connection#close()} on the returned + * connection instance. + * + * Typical usage: + *
+   * Connection connection = ConnectionFactory.createConnection(conf);
+   * Table table = connection.getTable("mytable");
+   * table.get(...);
+   * ...
+   * table.close();
+   * connection.close();
+   * 
+ * + * @param conf configuration + * @param pool the thread pool to use for batch operations + * @return Connection object for conf + */ + public static Connection createConnection(Configuration conf, User user) + throws IOException { + return createConnection(conf, null, user); + } + + /** + * Create a new Connection instance using the passed conf instance. Connection + * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces + * created from returned connection share zookeeper connection, meta cache, and connections + * to region servers and masters. + * The caller is responsible for calling {@link Connection#close()} on the returned + * connection instance. + * + * Typical usage: + *
+   * Connection connection = ConnectionFactory.createConnection(conf);
+   * Table table = connection.getTable("mytable");
+   * table.get(...);
+   * ...
+   * table.close();
+   * connection.close();
+   * 
+ * + * @param conf configuration + * @param user the user the connection is for + * @param pool the thread pool to use for batch operations + * @return Connection object for conf + */ + public static Connection createConnection(Configuration conf, ExecutorService pool, User user) + throws IOException { + if (user == null) { + UserProvider provider = UserProvider.instantiate(conf); + user = provider.getCurrent(); + } + + return ConnectionManager.createConnection(conf, pool, user); + } + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index 89c788a..c636552 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -349,6 +349,7 @@ class ConnectionManager { * @param conf configuration whose identity is used to find {@link HConnection} instance. * @deprecated */ + @Deprecated public static void deleteConnection(Configuration conf) { deleteConnection(new HConnectionKey(conf), false); } @@ -360,6 +361,7 @@ class ConnectionManager { * @param connection * @deprecated */ + @Deprecated public static void deleteStaleConnection(HConnection connection) { deleteConnection(connection, true); } @@ -370,6 +372,7 @@ class ConnectionManager { * staleConnection to true. * @deprecated */ + @Deprecated public static void deleteAllConnections(boolean staleConnection) { synchronized (CONNECTION_INSTANCES) { Set connectionKeys = new HashSet(); @@ -631,6 +634,12 @@ class ConnectionManager { } @Override + public Table getTable(TableName tableName, Configuration conf, ExecutorService pool) + throws IOException { + return new HTable(tableName, this, conf, pool); + } + + @Override public Admin getAdmin() throws IOException { if (managed) { throw new IOException("The connection has to be unmanaged."); @@ -730,6 +739,7 @@ class ConnectionManager { * An identifier that will remain the same for a given connection. * @return */ + @Override public String toString(){ return "hconnection-0x" + Integer.toHexString(hashCode()); } @@ -1973,7 +1983,7 @@ class ConnectionManager { } }; } - + private static void release(MasterServiceState mss) { if (mss != null && mss.connection != null) { @@ -2553,7 +2563,7 @@ class ConnectionManager { * Look for an exception we know in the remote exception: * - hadoop.ipc wrapped exceptions * - nested exceptions - * + * * Looks for: RegionMovedException / RegionOpeningException / RegionTooBusyException * @return null if we didn't find the exception, the exception otherwise. */ diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 759822a..b24af20 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -37,7 +37,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseIOException; @@ -148,15 +147,19 @@ import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; /** + *

HBaseAdmin is not a public interface anymore. Please obtain an {@link Admin} + * object through {@link Connection} instead. See {@link ConnectionFactory} for + * how to obtain a connection.

+ * * Provides an interface to manage HBase database table metadata + general * administrative functions. Use HBaseAdmin to create, drop, list, enable and * disable tables. Use it also to add and drop table column families. * - *

See {@link HTable} to add, update, and delete data from an individual table. + *

See {@link Table} to add, update, and delete data from an individual table. *

Currently HBaseAdmin instances are not expected to be long-lived. For * example, an HBaseAdmin instance will not ride over a Master restart. */ -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Evolving public class HBaseAdmin implements Admin { private static final Log LOG = LogFactory.getLog(HBaseAdmin.class); @@ -193,6 +196,7 @@ public class HBaseAdmin implements Admin { this.cleanupConnectionOnClose = true; } + @Override public int getOperationTimeout() { return operationTimeout; } @@ -266,6 +270,7 @@ public class HBaseAdmin implements Admin { } /** @return HConnection used by this object. */ + @Override public HConnection getConnection() { return connection; } @@ -275,6 +280,7 @@ public class HBaseAdmin implements Admin { * @throws ZooKeeperConnectionException * @throws MasterNotRunningException */ + @Override public boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException { return connection.isMasterRunning(); @@ -285,6 +291,7 @@ public class HBaseAdmin implements Admin { * @return True if table exists already. * @throws IOException */ + @Override public boolean tableExists(final TableName tableName) throws IOException { boolean b = false; @@ -317,6 +324,7 @@ public class HBaseAdmin implements Admin { * @return - returns an array of HTableDescriptors * @throws IOException if a remote or network exception occurs */ + @Override public HTableDescriptor[] listTables() throws IOException { return this.connection.listTables(); } @@ -329,6 +337,7 @@ public class HBaseAdmin implements Admin { * @throws IOException if a remote or network exception occurs * @see #listTables() */ + @Override public HTableDescriptor[] listTables(Pattern pattern) throws IOException { List matched = new LinkedList(); HTableDescriptor[] tables = listTables(); @@ -348,6 +357,7 @@ public class HBaseAdmin implements Admin { * @throws IOException if a remote or network exception occurs * @see #listTables(java.util.regex.Pattern) */ + @Override public HTableDescriptor[] listTables(String regex) throws IOException { return listTables(Pattern.compile(regex)); } @@ -395,6 +405,7 @@ public class HBaseAdmin implements Admin { * @return TableName[] table names * @throws IOException if a remote or network exception occurs */ + @Override public TableName[] listTableNames() throws IOException { return this.connection.listTableNames(); } @@ -406,6 +417,7 @@ public class HBaseAdmin implements Admin { * @throws TableNotFoundException * @throws IOException if a remote or network exception occurs */ + @Override public HTableDescriptor getTableDescriptor(final TableName tableName) throws TableNotFoundException, IOException { return this.connection.getHTableDescriptor(tableName); @@ -437,6 +449,7 @@ public class HBaseAdmin implements Admin { * and attempt-at-creation). * @throws IOException if a remote or network exception occurs */ + @Override public void createTable(HTableDescriptor desc) throws IOException { createTable(desc, null); @@ -466,6 +479,7 @@ public class HBaseAdmin implements Admin { * and attempt-at-creation). * @throws IOException */ + @Override public void createTable(HTableDescriptor desc, byte [] startKey, byte [] endKey, int numRegions) throws IOException { @@ -502,6 +516,7 @@ public class HBaseAdmin implements Admin { * and attempt-at-creation). * @throws IOException */ + @Override public void createTable(final HTableDescriptor desc, byte [][] splitKeys) throws IOException { try { @@ -590,6 +605,7 @@ public class HBaseAdmin implements Admin { * and attempt-at-creation). * @throws IOException */ + @Override public void createTableAsync( final HTableDescriptor desc, final byte [][] splitKeys) throws IOException { @@ -639,6 +655,7 @@ public class HBaseAdmin implements Admin { * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs */ + @Override public void deleteTable(final TableName tableName) throws IOException { boolean tableExists = true; @@ -733,6 +750,7 @@ public class HBaseAdmin implements Admin { * @see #deleteTables(java.util.regex.Pattern) * @see #deleteTable(java.lang.String) */ + @Override public HTableDescriptor[] deleteTables(String regex) throws IOException { return deleteTables(Pattern.compile(regex)); } @@ -748,6 +766,7 @@ public class HBaseAdmin implements Admin { * @return Table descriptors for tables that couldn't be deleted * @throws IOException */ + @Override public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException { List failed = new LinkedList(); for (HTableDescriptor table : listTables(pattern)) { @@ -775,6 +794,7 @@ public class HBaseAdmin implements Admin { * @see #disableTable(byte[]) * @see #enableTableAsync(byte[]) */ + @Override public void enableTable(final TableName tableName) throws IOException { enableTableAsync(tableName); @@ -846,6 +866,7 @@ public class HBaseAdmin implements Admin { * @throws IOException * @since 0.90.0 */ + @Override public void enableTableAsync(final TableName tableName) throws IOException { TableName.isLegalFullyQualifiedTableName(tableName.getName()); @@ -882,6 +903,7 @@ public class HBaseAdmin implements Admin { * @see #enableTables(java.util.regex.Pattern) * @see #enableTable(java.lang.String) */ + @Override public HTableDescriptor[] enableTables(String regex) throws IOException { return enableTables(Pattern.compile(regex)); } @@ -896,6 +918,7 @@ public class HBaseAdmin implements Admin { * @param pattern The pattern to match table names against * @throws IOException */ + @Override public HTableDescriptor[] enableTables(Pattern pattern) throws IOException { List failed = new LinkedList(); for (HTableDescriptor table : listTables(pattern)) { @@ -924,6 +947,7 @@ public class HBaseAdmin implements Admin { * @see #isTableEnabled(byte[]) * @since 0.90.0 */ + @Override public void disableTableAsync(final TableName tableName) throws IOException { TableName.isLegalFullyQualifiedTableName(tableName.getName()); executeCallable(new MasterCallable(getConnection()) { @@ -956,6 +980,7 @@ public class HBaseAdmin implements Admin { * TableNotFoundException means the table doesn't exist. * TableNotEnabledException means the table isn't in enabled state. */ + @Override public void disableTable(final TableName tableName) throws IOException { disableTableAsync(tableName); @@ -1009,6 +1034,7 @@ public class HBaseAdmin implements Admin { * @see #disableTables(java.util.regex.Pattern) * @see #disableTable(java.lang.String) */ + @Override public HTableDescriptor[] disableTables(String regex) throws IOException { return disableTables(Pattern.compile(regex)); } @@ -1024,6 +1050,7 @@ public class HBaseAdmin implements Admin { * @return Table descriptors for tables that couldn't be disabled * @throws IOException */ + @Override public HTableDescriptor[] disableTables(Pattern pattern) throws IOException { List failed = new LinkedList(); for (HTableDescriptor table : listTables(pattern)) { @@ -1054,6 +1081,7 @@ public class HBaseAdmin implements Admin { * @return true if table is on-line * @throws IOException if a remote or network exception occurs */ + @Override public boolean isTableEnabled(TableName tableName) throws IOException { checkTableExistence(tableName); return connection.isTableEnabled(tableName); @@ -1074,6 +1102,7 @@ public class HBaseAdmin implements Admin { * @return true if table is off-line * @throws IOException if a remote or network exception occurs */ + @Override public boolean isTableDisabled(TableName tableName) throws IOException { checkTableExistence(tableName); return connection.isTableDisabled(tableName); @@ -1092,6 +1121,7 @@ public class HBaseAdmin implements Admin { * @return true if all regions of the table are available * @throws IOException if a remote or network exception occurs */ + @Override public boolean isTableAvailable(TableName tableName) throws IOException { return connection.isTableAvailable(tableName); } @@ -1116,6 +1146,7 @@ public class HBaseAdmin implements Admin { * @throws IOException * if a remote or network excpetion occurs */ + @Override public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException { return connection.isTableAvailable(tableName, splitKeys); @@ -1142,6 +1173,7 @@ public class HBaseAdmin implements Admin { * @throws IOException * if a remote or network exception occurs */ + @Override public Pair getAlterStatus(final TableName tableName) throws IOException { return executeCallable(new MasterCallable>(getConnection()) { @@ -1169,6 +1201,7 @@ public class HBaseAdmin implements Admin { * @throws IOException * if a remote or network exception occurs */ + @Override public Pair getAlterStatus(final byte[] tableName) throws IOException { return getAlterStatus(TableName.valueOf(tableName)); @@ -1209,6 +1242,7 @@ public class HBaseAdmin implements Admin { * @param column column descriptor of column to be added * @throws IOException if a remote or network exception occurs */ + @Override public void addColumn(final TableName tableName, final HColumnDescriptor column) throws IOException { executeCallable(new MasterCallable(getConnection()) { @@ -1255,6 +1289,7 @@ public class HBaseAdmin implements Admin { * @param columnName name of column to be deleted * @throws IOException if a remote or network exception occurs */ + @Override public void deleteColumn(final TableName tableName, final byte [] columnName) throws IOException { executeCallable(new MasterCallable(getConnection()) { @@ -1303,6 +1338,7 @@ public class HBaseAdmin implements Admin { * @param descriptor new column descriptor to use * @throws IOException if a remote or network exception occurs */ + @Override public void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor) throws IOException { executeCallable(new MasterCallable(getConnection()) { @@ -1323,6 +1359,7 @@ public class HBaseAdmin implements Admin { * the one currently in hbase:meta * @throws IOException if a remote or network exception occurs */ + @Override public void closeRegion(final String regionname, final String serverName) throws IOException { closeRegion(Bytes.toBytes(regionname), serverName); @@ -1338,6 +1375,7 @@ public class HBaseAdmin implements Admin { * host187.example.com,60020,1289493121758 * @throws IOException if a remote or network exception occurs */ + @Override public void closeRegion(final byte [] regionname, final String serverName) throws IOException { CatalogTracker ct = getCatalogTracker(); @@ -1385,6 +1423,7 @@ public class HBaseAdmin implements Admin { * @throws IOException * if a remote or network exception occurs */ + @Override public boolean closeRegionWithEncodedRegionName(final String encodedRegionName, final String serverName) throws IOException { if (null == serverName || ("").equals(serverName.trim())) { @@ -1415,6 +1454,7 @@ public class HBaseAdmin implements Admin { * @param hri * @throws IOException */ + @Override public void closeRegion(final ServerName sn, final HRegionInfo hri) throws IOException { AdminService.BlockingInterface admin = this.connection.getAdmin(sn); @@ -1425,6 +1465,7 @@ public class HBaseAdmin implements Admin { /** * Get all the online regions on a region server. */ + @Override public List getOnlineRegions( final ServerName sn) throws IOException { AdminService.BlockingInterface admin = this.connection.getAdmin(sn); @@ -1500,6 +1541,16 @@ public class HBaseAdmin implements Admin { } } + @Override + public void flushRegion(byte[] regionName) throws IOException, InterruptedException { + flush(regionName); + } + + @Override + public void flushTable(TableName tableName) throws IOException, InterruptedException { + flush(tableName.getName()); // TODO: implement this properly + } + /** * Compact a table or an individual region. * Asynchronous operation. @@ -1670,6 +1721,31 @@ public class HBaseAdmin implements Admin { } } + @Override + public void compactRegion(byte[] regionName, boolean isMajor) throws IOException, + InterruptedException { + compactRegion(regionName, null, isMajor); + } + + @Override + public void compactRegion(byte[] regionName, byte[] columnFamily, boolean isMajor) + throws IOException, InterruptedException { + compact(regionName, columnFamily, isMajor); + } + + @Override + public void compactTable(TableName tableName, boolean isMajor) throws IOException, + InterruptedException { + compactTable(tableName, null, isMajor); + } + + @Override + public void compactTable(TableName tableName, byte[] columnFamily, boolean isMajor) + throws IOException, InterruptedException { + // TODO: implement compact region and table separately + compact(tableName.getName(), columnFamily, isMajor); + } + /** * Move the region r to dest. * @param encodedRegionName The encoded region name; i.e. the hash that makes @@ -1685,6 +1761,7 @@ public class HBaseAdmin implements Admin { * @throws ZooKeeperConnectionException * @throws MasterNotRunningException */ + @Override public void move(final byte [] encodedRegionName, final byte [] destServerName) throws HBaseIOException, MasterNotRunningException, ZooKeeperConnectionException { MasterKeepAliveConnection stub = connection.getKeepAliveMasterService(); @@ -1712,6 +1789,7 @@ public class HBaseAdmin implements Admin { * @throws ZooKeeperConnectionException * @throws IOException */ + @Override public void assign(final byte[] regionName) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { final byte[] toBeAssigned = getRegionName(regionName); @@ -1740,6 +1818,7 @@ public class HBaseAdmin implements Admin { * @throws ZooKeeperConnectionException * @throws IOException */ + @Override public void unassign(final byte [] regionName, final boolean force) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { final byte[] toBeUnassigned = getRegionName(regionName); @@ -1766,6 +1845,7 @@ public class HBaseAdmin implements Admin { * Region to offline. * @throws IOException */ + @Override public void offline(final byte [] regionName) throws IOException { MasterKeepAliveConnection master = connection.getKeepAliveMasterService(); @@ -1784,6 +1864,7 @@ public class HBaseAdmin implements Admin { * @param synchronous If true, it waits until current balance() call, if outstanding, to return. * @return Previous balancer value */ + @Override public boolean setBalancerRunning(final boolean on, final boolean synchronous) throws MasterNotRunningException, ZooKeeperConnectionException { MasterKeepAliveConnection stub = connection.getKeepAliveMasterService(); @@ -1814,6 +1895,7 @@ public class HBaseAdmin implements Admin { * logs. * @return True if balancer ran, false otherwise. */ + @Override public boolean balancer() throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException { MasterKeepAliveConnection stub = connection.getKeepAliveMasterService(); @@ -1831,6 +1913,7 @@ public class HBaseAdmin implements Admin { * @throws ServiceException * @throws MasterNotRunningException */ + @Override public boolean enableCatalogJanitor(boolean enable) throws ServiceException, MasterNotRunningException { MasterKeepAliveConnection stub = connection.getKeepAliveMasterService(); @@ -1848,6 +1931,7 @@ public class HBaseAdmin implements Admin { * @throws ServiceException * @throws MasterNotRunningException */ + @Override public int runCatalogScan() throws ServiceException, MasterNotRunningException { MasterKeepAliveConnection stub = connection.getKeepAliveMasterService(); try { @@ -1863,6 +1947,7 @@ public class HBaseAdmin implements Admin { * @throws ServiceException * @throws org.apache.hadoop.hbase.MasterNotRunningException */ + @Override public boolean isCatalogJanitorEnabled() throws ServiceException, MasterNotRunningException { MasterKeepAliveConnection stub = connection.getKeepAliveMasterService(); try { @@ -1881,6 +1966,7 @@ public class HBaseAdmin implements Admin { * two adjacent regions * @throws IOException */ + @Override public void mergeRegions(final byte[] encodedNameOfRegionA, final byte[] encodedNameOfRegionB, final boolean forcible) throws IOException { @@ -1916,6 +2002,7 @@ public class HBaseAdmin implements Admin { * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ + @Override public void split(final String tableNameOrRegionName) throws IOException, InterruptedException { split(Bytes.toBytes(tableNameOrRegionName)); @@ -1929,11 +2016,13 @@ public class HBaseAdmin implements Admin { * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ + @Override public void split(final byte[] tableNameOrRegionName) throws IOException, InterruptedException { split(tableNameOrRegionName, null); } + @Override public void split(final String tableNameOrRegionName, final String splitPoint) throws IOException, InterruptedException { split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint)); @@ -1948,6 +2037,7 @@ public class HBaseAdmin implements Admin { * @throws IOException if a remote or network exception occurs * @throws InterruptedException interrupt exception occurred */ + @Override public void split(final byte[] tableNameOrRegionName, final byte [] splitPoint) throws IOException, InterruptedException { CatalogTracker ct = getCatalogTracker(); @@ -2002,6 +2092,7 @@ public class HBaseAdmin implements Admin { * @param htd modified description of the table * @throws IOException if a remote or network exception occurs */ + @Override public void modifyTable(final TableName tableName, final HTableDescriptor htd) throws IOException { if (!tableName.equals(htd.getTableName())) { @@ -2121,6 +2212,7 @@ public class HBaseAdmin implements Admin { * Shuts down the HBase cluster * @throws IOException if a remote or network exception occurs */ + @Override public synchronized void shutdown() throws IOException { executeCallable(new MasterCallable(getConnection()) { @Override @@ -2137,6 +2229,7 @@ public class HBaseAdmin implements Admin { * @see #shutdown() * @throws IOException if a remote or network exception occurs */ + @Override public synchronized void stopMaster() throws IOException { executeCallable(new MasterCallable(getConnection()) { @Override @@ -2153,6 +2246,7 @@ public class HBaseAdmin implements Admin { * example.org:1234 * @throws IOException if a remote or network exception occurs */ + @Override public synchronized void stopRegionServer(final String hostnamePort) throws IOException { String hostname = Addressing.parseHostname(hostnamePort); @@ -2173,6 +2267,7 @@ public class HBaseAdmin implements Admin { * @return cluster status * @throws IOException if a remote or network exception occurs */ + @Override public ClusterStatus getClusterStatus() throws IOException { return executeCallable(new MasterCallable(getConnection()) { @Override @@ -2192,6 +2287,7 @@ public class HBaseAdmin implements Admin { /** * @return Configuration used by the instance. */ + @Override public Configuration getConfiguration() { return this.conf; } @@ -2201,6 +2297,7 @@ public class HBaseAdmin implements Admin { * @param descriptor descriptor which describes the new namespace * @throws IOException */ + @Override public void createNamespace(final NamespaceDescriptor descriptor) throws IOException { executeCallable(new MasterCallable(getConnection()) { @Override @@ -2219,6 +2316,7 @@ public class HBaseAdmin implements Admin { * @param descriptor descriptor which describes the new namespace * @throws IOException */ + @Override public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException { executeCallable(new MasterCallable(getConnection()) { @Override @@ -2235,6 +2333,7 @@ public class HBaseAdmin implements Admin { * @param name namespace name * @throws IOException */ + @Override public void deleteNamespace(final String name) throws IOException { executeCallable(new MasterCallable(getConnection()) { @Override @@ -2252,6 +2351,7 @@ public class HBaseAdmin implements Admin { * @return A descriptor * @throws IOException */ + @Override public NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException { return executeCallable(new MasterCallable(getConnection()) { @@ -2269,6 +2369,7 @@ public class HBaseAdmin implements Admin { * @return List of descriptors * @throws IOException */ + @Override public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException { return executeCallable(new MasterCallable(getConnection()) { @@ -2292,6 +2393,7 @@ public class HBaseAdmin implements Admin { * @return A descriptor * @throws IOException */ + @Override public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException { return executeCallable(new MasterCallable(getConnection()) { @@ -2316,6 +2418,7 @@ public class HBaseAdmin implements Admin { * @return The list of table names in the namespace * @throws IOException */ + @Override public TableName[] listTableNamesByNamespace(final String name) throws IOException { return executeCallable(new MasterCallable(getConnection()) { @@ -2392,6 +2495,7 @@ public class HBaseAdmin implements Admin { * @return Ordered list of {@link HRegionInfo}. * @throws IOException */ + @Override public List getTableRegions(final TableName tableName) throws IOException { CatalogTracker ct = getCatalogTracker(); @@ -2423,6 +2527,7 @@ public class HBaseAdmin implements Admin { * @return HTD[] the tableDescriptor * @throws IOException if a remote or network exception occurs */ + @Override public HTableDescriptor[] getTableDescriptorsByTableName(List tableNames) throws IOException { return this.connection.getHTableDescriptorsByTableName(tableNames); @@ -2434,6 +2539,7 @@ public class HBaseAdmin implements Admin { * @return HTD[] the tableDescriptor * @throws IOException if a remote or network exception occurs */ + @Override public HTableDescriptor[] getTableDescriptors(List names) throws IOException { List tableNames = new ArrayList(names.size()); @@ -2456,7 +2562,8 @@ public class HBaseAdmin implements Admin { * @throws IOException if a remote or network exception occurs * @throws FailedLogCloseException */ - public synchronized byte[][] rollHLogWriter(String serverName) + @Override +public synchronized byte[][] rollHLogWriter(String serverName) throws IOException, FailedLogCloseException { ServerName sn = ServerName.valueOf(serverName); AdminService.BlockingInterface admin = this.connection.getAdmin(sn); @@ -2475,6 +2582,7 @@ public class HBaseAdmin implements Admin { } } + @Override public String[] getMasterCoprocessors() { try { return getClusterStatus().getMasterCoprocessors(); @@ -2493,6 +2601,7 @@ public class HBaseAdmin implements Admin { * @throws InterruptedException * @return the current compaction state */ + @Override public CompactionState getCompactionState(final String tableNameOrRegionName) throws IOException, InterruptedException { return getCompactionState(Bytes.toBytes(tableNameOrRegionName)); @@ -2507,6 +2616,7 @@ public class HBaseAdmin implements Admin { * @throws InterruptedException * @return the current compaction state */ + @Override public CompactionState getCompactionState(final byte[] tableNameOrRegionName) throws IOException, InterruptedException { CompactionState state = CompactionState.NONE; @@ -2599,6 +2709,7 @@ public class HBaseAdmin implements Admin { * @throws SnapshotCreationException if snapshot creation failed * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ + @Override public void snapshot(final String snapshotName, final TableName tableName) throws IOException, SnapshotCreationException, IllegalArgumentException { @@ -2628,6 +2739,7 @@ public class HBaseAdmin implements Admin { * @throws SnapshotCreationException if snapshot creation failed * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ + @Override public void snapshot(final byte[] snapshotName, final TableName tableName) throws IOException, SnapshotCreationException, IllegalArgumentException { @@ -2659,6 +2771,7 @@ public class HBaseAdmin implements Admin { * @throws SnapshotCreationException if snapshot creation failed * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ + @Override public void snapshot(final String snapshotName, final TableName tableName, SnapshotDescription.Type type) throws IOException, SnapshotCreationException, @@ -2705,6 +2818,7 @@ public class HBaseAdmin implements Admin { * @throws SnapshotCreationException if snapshot failed to be taken * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ + @Override public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException, IllegalArgumentException { // actually take the snapshot @@ -2755,6 +2869,7 @@ public class HBaseAdmin implements Admin { * @throws SnapshotCreationException if snapshot creation failed * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ + @Override public SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException, SnapshotCreationException { ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); @@ -2789,6 +2904,7 @@ public class HBaseAdmin implements Admin { * @throws HBaseSnapshotException if the snapshot failed * @throws UnknownSnapshotException if the requested snapshot is unknown */ + @Override public boolean isSnapshotFinished(final SnapshotDescription snapshot) throws IOException, HBaseSnapshotException, UnknownSnapshotException { @@ -2814,6 +2930,7 @@ public class HBaseAdmin implements Admin { * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ + @Override public void restoreSnapshot(final byte[] snapshotName) throws IOException, RestoreSnapshotException { restoreSnapshot(Bytes.toString(snapshotName)); @@ -2832,6 +2949,7 @@ public class HBaseAdmin implements Admin { * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ + @Override public void restoreSnapshot(final String snapshotName) throws IOException, RestoreSnapshotException { boolean takeFailSafeSnapshot = @@ -2855,6 +2973,7 @@ public class HBaseAdmin implements Admin { * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ + @Override public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot) throws IOException, RestoreSnapshotException { restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot); @@ -2876,6 +2995,7 @@ public class HBaseAdmin implements Admin { * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ + @Override public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot) throws IOException, RestoreSnapshotException { TableName tableName = null; @@ -2979,6 +3099,7 @@ public class HBaseAdmin implements Admin { * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ + @Override public void cloneSnapshot(final byte[] snapshotName, final TableName tableName) throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException { cloneSnapshot(Bytes.toString(snapshotName), tableName); @@ -3011,6 +3132,7 @@ public class HBaseAdmin implements Admin { * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ + @Override public void cloneSnapshot(final String snapshotName, final TableName tableName) throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException { if (tableExists(tableName)) { @@ -3029,6 +3151,7 @@ public class HBaseAdmin implements Admin { * optional. * @param props Property/Value pairs of properties passing to the procedure */ + @Override public void execProcedure(String signature, String instance, Map props) throws IOException { ProcedureDescription.Builder builder = ProcedureDescription.newBuilder(); @@ -3095,6 +3218,7 @@ public class HBaseAdmin implements Admin { * @return true if the specified procedure is finished successfully, false if it is still running * @throws IOException if the specified procedure finished with error */ + @Override public boolean isProcedureFinished(String signature, String instance, Map props) throws IOException { final ProcedureDescription.Builder builder = ProcedureDescription.newBuilder(); @@ -3195,6 +3319,7 @@ public class HBaseAdmin implements Admin { * @return a list of snapshot descriptors for completed snapshots * @throws IOException if a network error occurs */ + @Override public List listSnapshots() throws IOException { return executeCallable(new MasterCallable>(getConnection()) { @Override @@ -3212,6 +3337,7 @@ public class HBaseAdmin implements Admin { * @return - returns a List of SnapshotDescription * @throws IOException if a remote or network exception occurs */ + @Override public List listSnapshots(String regex) throws IOException { return listSnapshots(Pattern.compile(regex)); } @@ -3223,6 +3349,7 @@ public class HBaseAdmin implements Admin { * @return - returns a List of SnapshotDescription * @throws IOException if a remote or network exception occurs */ + @Override public List listSnapshots(Pattern pattern) throws IOException { List matched = new LinkedList(); List snapshots = listSnapshots(); @@ -3239,6 +3366,7 @@ public class HBaseAdmin implements Admin { * @param snapshotName name of the snapshot * @throws IOException if a remote or network exception occurs */ + @Override public void deleteSnapshot(final byte[] snapshotName) throws IOException { deleteSnapshot(Bytes.toString(snapshotName)); } @@ -3248,6 +3376,7 @@ public class HBaseAdmin implements Admin { * @param snapshotName name of the snapshot * @throws IOException if a remote or network exception occurs */ + @Override public void deleteSnapshot(final String snapshotName) throws IOException { // make sure the snapshot is possibly valid TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName)); @@ -3268,6 +3397,7 @@ public class HBaseAdmin implements Admin { * @param regex The regular expression to match against * @throws IOException if a remote or network exception occurs */ + @Override public void deleteSnapshots(final String regex) throws IOException { deleteSnapshots(Pattern.compile(regex)); } @@ -3277,6 +3407,7 @@ public class HBaseAdmin implements Admin { * @param pattern pattern for names of the snapshot to match * @throws IOException if a remote or network exception occurs */ + @Override public void deleteSnapshots(final Pattern pattern) throws IOException { List snapshots = listSnapshots(pattern); for (final SnapshotDescription snapshot : snapshots) { @@ -3361,6 +3492,7 @@ public class HBaseAdmin implements Admin { * * @return A MasterCoprocessorRpcChannel instance */ + @Override public CoprocessorRpcChannel coprocessorService() { return new MasterCoprocessorRpcChannel(connection); } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index 0305821..7713c36 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -18,7 +18,6 @@ */ package org.apache.hadoop.hbase.client; -import java.io.Closeable; import java.io.IOException; import java.util.List; import java.util.concurrent.ExecutorService; @@ -26,7 +25,6 @@ import java.util.concurrent.ExecutorService; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; @@ -44,7 +42,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; * keeps a cache of locations and then knows how to re-calibrate after they move. You need one * of these to talk to your HBase cluster. {@link HConnectionManager} manages instances of this * class. See it for how to get one of these. - * + * *

This is NOT a connection to a particular server but to ALL servers in the cluster. Individual * connections are managed at a lower level. * @@ -57,10 +55,12 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; * HConnections awkward. See {@link HConnectionManager} for cleanup discussion. * * @see HConnectionManager + * @deprecated in favor of {@link Connection} and {@link ConnectionFactory} */ @InterfaceAudience.Public @InterfaceStability.Stable -public interface HConnection extends Abortable, Closeable { +@Deprecated +public interface HConnection extends Connection { /** * Key for configuration in Configuration whose value is the class we implement making a * new HConnection instance. @@ -70,6 +70,7 @@ public interface HConnection extends Abortable, Closeable { /** * @return Configuration instance being used by this HConnection instance. */ + @Override Configuration getConfiguration(); /** @@ -109,6 +110,7 @@ public interface HConnection extends Abortable, Closeable { * @param tableName * @return an HTable to use for interactions with this table */ + @Override public HTableInterface getTable(TableName tableName) throws IOException; /** @@ -151,6 +153,7 @@ public interface HConnection extends Abortable, Closeable { * @param pool The thread pool to use for batch operations, null to use a default pool. * @return an HTable to use for interactions with this table */ + @Override public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException; /** @@ -162,6 +165,7 @@ public interface HConnection extends Abortable, Closeable { * * @return an Admin instance for cluster administration */ + @Override Admin getAdmin() throws IOException; /** @return - true if the master server is running @@ -412,6 +416,7 @@ public interface HConnection extends Abortable, Closeable { * @throws IOException if a remote or network exception occurs * @deprecated You can pass master flag but nothing special is done. */ + @Deprecated AdminService.BlockingInterface getAdmin(final ServerName serverName, boolean getMaster) throws IOException; @@ -506,6 +511,7 @@ public interface HConnection extends Abortable, Closeable { * @throws IOException if a remote or network exception occurs * @deprecated This method will be changed from public to package protected. */ + @Deprecated int getCurrentNrHRS() throws IOException; /** @@ -522,6 +528,7 @@ public interface HConnection extends Abortable, Closeable { /** * @return true if this connection is closed */ + @Override boolean isClosed(); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 7436b85..a074c63 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -18,83 +18,14 @@ */ package org.apache.hadoop.hbase.client; -import java.io.Closeable; import java.io.IOException; -import java.io.InterruptedIOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.UndeclaredThrowableException; -import java.net.SocketException; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.NavigableMap; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Chore; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MasterNotRunningException; -import org.apache.hadoop.hbase.RegionTooBusyException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotEnabledException; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; -import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.exceptions.RegionMovedException; -import org.apache.hadoop.hbase.exceptions.RegionOpeningException; -import org.apache.hadoop.hbase.ipc.RpcClient; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.RequestConverter; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*; -import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.ExceptionUtil; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.zookeeper.KeeperException; - -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.BlockingRpcChannel; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; /** * A non-instantiable class that manages creation of {@link HConnection}s. @@ -157,11 +88,13 @@ import com.google.protobuf.ServiceException; * were problematic for clients of HConnection that wanted to register their * own shutdown hooks so we removed ours though this shifts the onus for * cleanup to the client. + * @deprecated Please use ConnectionFactory instead */ @SuppressWarnings("serial") @InterfaceAudience.Public @InterfaceStability.Evolving -public class HConnectionManager { +@Deprecated +public class HConnectionManager extends ConnectionFactory { @Deprecated public static final String RETRIES_BY_SERVER_KEY = @@ -187,6 +120,7 @@ public class HConnectionManager { * @return HConnection object for conf * @throws ZooKeeperConnectionException */ + @Deprecated public static HConnection getConnection(final Configuration conf) throws IOException { return ConnectionManager.getConnectionInternal(conf); } @@ -312,6 +246,7 @@ public class HConnectionManager { * @param conf configuration whose identity is used to find {@link HConnection} instance. * @deprecated */ + @Deprecated public static void deleteConnection(Configuration conf) { ConnectionManager.deleteConnection(conf); } @@ -323,6 +258,7 @@ public class HConnectionManager { * @param connection * @deprecated */ + @Deprecated public static void deleteStaleConnection(HConnection connection) { ConnectionManager.deleteStaleConnection(connection); } @@ -333,6 +269,7 @@ public class HConnectionManager { * staleConnection to true. * @deprecated */ + @Deprecated public static void deleteAllConnections(boolean staleConnection) { ConnectionManager.deleteAllConnections(staleConnection); } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 49d9354..69e2b0f 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -43,7 +43,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -81,12 +80,15 @@ import com.google.protobuf.ServiceException; import com.google.common.annotations.VisibleForTesting; /** + *

HTable is not a public interface anymore. Please obtain a {@link Table} + * object through {@link Connection} instead. See {@link ConnectionFactory} for + * how to obtain a connection.

*

Used to communicate with a single HBase table. An implementation of * {@link HTableInterface}. Instances of this class can be constructed directly but it is * encouraged that users get instances via {@link HConnection} and {@link HConnectionManager}. * See {@link HConnectionManager} class comment for an example. * - *

This class is not thread safe for reads nor write. + *

This class is not thread safe for reads and writes. * *

In case of writes (Put, Delete), the underlying write buffer can * be corrupted if multiple threads contend over a single HTable instance. @@ -121,9 +123,9 @@ import com.google.common.annotations.VisibleForTesting; * @see HConnection * @see HConnectionManager */ -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Stable -public class HTable implements HTableInterface { +public class HTable implements HTableInterface, Table { private static final Log LOG = LogFactory.getLog(HTable.class); protected ClusterConnection connection; private final TableName tableName; @@ -156,7 +158,10 @@ public class HTable implements HTableInterface { * @param conf Configuration object to use. * @param tableName Name of the table. * @throws IOException if a remote or network exception occurs + * @deprecated Constructing HTable objects manually has been deprecated. Please use + * {@link Connection} to instantiate a {@link Table} instead. */ + @Deprecated public HTable(Configuration conf, final String tableName) throws IOException { this(conf, TableName.valueOf(tableName)); @@ -171,7 +176,10 @@ public class HTable implements HTableInterface { * @param conf Configuration object to use. * @param tableName Name of the table. * @throws IOException if a remote or network exception occurs + * @deprecated Constructing HTable objects manually has been deprecated. Please use + * {@link Connection} to instantiate a {@link Table} instead. */ + @Deprecated public HTable(Configuration conf, final byte[] tableName) throws IOException { this(conf, TableName.valueOf(tableName)); @@ -188,7 +196,10 @@ public class HTable implements HTableInterface { * @param conf Configuration object to use. * @param tableName table name pojo * @throws IOException if a remote or network exception occurs + * @deprecated Constructing HTable objects manually has been deprecated. Please use + * {@link Connection} to instantiate a {@link Table} instead. */ + @Deprecated public HTable(Configuration conf, final TableName tableName) throws IOException { this.tableName = tableName; @@ -253,7 +264,10 @@ public class HTable implements HTableInterface { * @param tableName Name of the table. * @param pool ExecutorService to be used. * @throws IOException if a remote or network exception occurs + * @deprecated Constructing HTable objects manually has been deprecated. Please use + * {@link Connection} to instantiate a {@link Table} instead. */ + @Deprecated public HTable(Configuration conf, final byte[] tableName, final ExecutorService pool) throws IOException { this(conf, TableName.valueOf(tableName), pool); @@ -270,7 +284,10 @@ public class HTable implements HTableInterface { * @param tableName Name of the table. * @param pool ExecutorService to be used. * @throws IOException if a remote or network exception occurs + * @deprecated Constructing HTable objects manually has been deprecated. Please use + * {@link Connection} to instantiate a {@link Table} instead. */ + @Deprecated public HTable(Configuration conf, final TableName tableName, final ExecutorService pool) throws IOException { this.connection = ConnectionManager.getConnectionInternal(conf); @@ -318,17 +335,37 @@ public class HTable implements HTableInterface { * @param connection HConnection to be used. * @param pool ExecutorService to be used. * @throws IOException if a remote or network exception occurs + * @deprecated Constructing HTable objects manually has been deprecated. Please use + * {@link Connection} to instantiate a {@link Table} instead. */ + @Deprecated @InterfaceAudience.Private public HTable(TableName tableName, final ClusterConnection connection, final ExecutorService pool) throws IOException { + this(tableName, connection, connection.getConfiguration(), pool); + } + + /** + * Creates an object to access a HBase table. + * Shares zookeeper connection and other resources with other HTable instances + * created with the same connection instance. + * Visible only for HTableWrapper which is in different package. + * Should not be used by exernal code. + * @param tableName Name of the table. + * @param connection HConnection to be used. + * @param conf configuration for the table + * @param pool ExecutorService to be used. + * @throws IOException if a remote or network exception occurs + */ + HTable(final TableName tableName, final ClusterConnection connection, + final Configuration conf, final ExecutorService pool) throws IOException { if (connection == null || connection.isClosed()) { throw new IllegalArgumentException("Connection is null or closed."); } this.tableName = tableName; this.cleanupPoolOnClose = this.cleanupConnectionOnClose = false; this.connection = connection; - this.configuration = connection.getConfiguration(); + this.configuration = conf; this.pool = pool; this.finishSetup(); @@ -724,7 +761,8 @@ public class HTable implements HTableInterface { throws IOException { RegionServerCallable callable = new RegionServerCallable(this.connection, tableName, row) { - public Result call(int callTimeout) throws IOException { + @Override + public Result call(int callTimeout) throws IOException { PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); controller.setPriority(tableName); controller.setCallTimeout(callTimeout); @@ -798,6 +836,7 @@ public class HTable implements HTableInterface { public Result get(final Get get) throws IOException { RegionServerCallable callable = new RegionServerCallable(this.connection, getName(), get.getRow()) { + @Override public Result call(int callTimeout) throws IOException { ClientProtos.GetRequest request = RequestConverter.buildGetRequest(getLocation().getRegionInfo().getRegionName(), get); @@ -859,6 +898,7 @@ public class HTable implements HTableInterface { * @deprecated If any exception is thrown by one of the actions, there is no way to * retrieve the partially executed results. Use {@link #batch(List, Object[])} instead. */ + @Deprecated @Override public Object[] batch(final List actions) throws InterruptedException, IOException { @@ -884,6 +924,7 @@ public class HTable implements HTableInterface { * {@link #batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} * instead. */ + @Deprecated @Override public Object[] batchCallback( final List actions, final Batch.Callback callback) throws IOException, @@ -901,6 +942,7 @@ public class HTable implements HTableInterface { throws IOException { RegionServerCallable callable = new RegionServerCallable(connection, tableName, delete.getRow()) { + @Override public Boolean call(int callTimeout) throws IOException { PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); controller.setPriority(tableName); @@ -1041,6 +1083,7 @@ public class HTable implements HTableInterface { public void mutateRow(final RowMutations rm) throws IOException { RegionServerCallable callable = new RegionServerCallable(connection, getName(), rm.getRow()) { + @Override public Void call(int callTimeout) throws IOException { PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); controller.setPriority(tableName); @@ -1075,6 +1118,7 @@ public class HTable implements HTableInterface { final long nonceGroup = ng.getNonceGroup(), nonce = ng.newNonce(); RegionServerCallable callable = new RegionServerCallable(this.connection, getName(), append.getRow()) { + @Override public Result call(int callTimeout) throws IOException { PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); controller.setPriority(getTableName()); @@ -1106,6 +1150,7 @@ public class HTable implements HTableInterface { final long nonceGroup = ng.getNonceGroup(), nonce = ng.newNonce(); RegionServerCallable callable = new RegionServerCallable(this.connection, getName(), increment.getRow()) { + @Override public Result call(int callTimeout) throws IOException { PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); controller.setPriority(getTableName()); @@ -1169,6 +1214,7 @@ public class HTable implements HTableInterface { final long nonceGroup = ng.getNonceGroup(), nonce = ng.newNonce(); RegionServerCallable callable = new RegionServerCallable(connection, getName(), row) { + @Override public Long call(int callTimeout) throws IOException { PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); controller.setPriority(getTableName()); @@ -1199,6 +1245,7 @@ public class HTable implements HTableInterface { throws IOException { RegionServerCallable callable = new RegionServerCallable(connection, getName(), row) { + @Override public Boolean call(int callTimeout) throws IOException { PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); controller.setPriority(tableName); @@ -1227,6 +1274,7 @@ public class HTable implements HTableInterface { throws IOException { RegionServerCallable callable = new RegionServerCallable(connection, getName(), row) { + @Override public Boolean call(int callTimeout) throws IOException { PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); controller.setPriority(tableName); @@ -1256,6 +1304,7 @@ public class HTable implements HTableInterface { throws IOException { RegionServerCallable callable = new RegionServerCallable(connection, getName(), row) { + @Override public Boolean call(int callTimeout) throws IOException { PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); controller.setPriority(tableName); @@ -1284,6 +1333,7 @@ public class HTable implements HTableInterface { throws IOException { RegionServerCallable callable = new RegionServerCallable(connection, getName(), row) { + @Override public Boolean call(int callTimeout) throws IOException { PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); controller.setPriority(tableName); @@ -1471,6 +1521,7 @@ public class HTable implements HTableInterface { * @param writeBufferSize The new write buffer size, in bytes. * @throws IOException if a remote or network exception occurs. */ + @Override public void setWriteBufferSize(long writeBufferSize) throws IOException { this.writeBufferSize = writeBufferSize; if(currentWriteBufferSize > writeBufferSize) { @@ -1592,6 +1643,7 @@ public class HTable implements HTableInterface { /** * {@inheritDoc} */ + @Override public CoprocessorRpcChannel coprocessorService(byte[] row) { return new RegionCoprocessorRpcChannel(connection, tableName, row); } @@ -1606,6 +1658,7 @@ public class HTable implements HTableInterface { final Map results = Collections.synchronizedMap( new TreeMap(Bytes.BYTES_COMPARATOR)); coprocessorService(service, startKey, endKey, callable, new Batch.Callback() { + @Override public void update(byte[] region, byte[] row, R value) { if (region != null) { results.put(region, value); @@ -1633,6 +1686,7 @@ public class HTable implements HTableInterface { new RegionCoprocessorRpcChannel(connection, tableName, r); Future future = pool.submit( new Callable() { + @Override public R call() throws Exception { T instance = ProtobufUtil.newServiceStub(service, channel); R result = callable.call(instance); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java index 44be373..91bf7a7 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; -import java.io.Closeable; import java.io.IOException; import java.util.List; import java.util.Map; @@ -43,21 +42,25 @@ import java.util.Map; * Obtain an instance from an {@link HConnection}. * * @since 0.21.0 + * @deprecated use {@link Table} instead */ @InterfaceAudience.Public @InterfaceStability.Stable -public interface HTableInterface extends Closeable { +@Deprecated +public interface HTableInterface extends Table { /** * Gets the name of this table. * * @return the table name. */ + @Override byte[] getTableName(); /** * Gets the fully qualified table name instance of this table. */ + @Override TableName getName(); /** @@ -66,12 +69,14 @@ public interface HTableInterface extends Closeable { * The reference returned is not a copy, so any change made to it will * affect this instance. */ + @Override Configuration getConfiguration(); /** * Gets the {@link HTableDescriptor table descriptor} for this table. * @throws IOException if a remote or network exception occurs. */ + @Override HTableDescriptor getTableDescriptor() throws IOException; /** @@ -88,6 +93,7 @@ public interface HTableInterface extends Closeable { * @return true if the specified Get matches one or more keys, false if not * @throws IOException e */ + @Override boolean exists(Get get) throws IOException; /** @@ -105,6 +111,7 @@ public interface HTableInterface extends Closeable { * @return Array of Boolean true if the specified Get matches one or more keys, false if not * @throws IOException e */ + @Override Boolean[] exists(List gets) throws IOException; /** @@ -120,6 +127,7 @@ public interface HTableInterface extends Closeable { * @throws IOException * @since 0.90.0 */ + @Override void batch(final List actions, final Object[] results) throws IOException, InterruptedException; /** @@ -134,12 +142,14 @@ public interface HTableInterface extends Closeable { * @deprecated If any exception is thrown by one of the actions, there is no way to * retrieve the partially executed results. Use {@link #batch(List, Object[])} instead. */ + @Deprecated Object[] batch(final List actions) throws IOException, InterruptedException; /** * Same as {@link #batch(List, Object[])}, but with a callback. * @since 0.96.0 */ + @Override void batchCallback( final List actions, final Object[] results, final Batch.Callback callback ) @@ -154,6 +164,7 @@ public interface HTableInterface extends Closeable { * {@link #batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} * instead. */ + @Deprecated Object[] batchCallback( List actions, Batch.Callback callback ) throws IOException, @@ -168,6 +179,7 @@ public interface HTableInterface extends Closeable { * @throws IOException if a remote or network exception occurs. * @since 0.20.0 */ + @Override Result get(Get get) throws IOException; /** @@ -184,6 +196,7 @@ public interface HTableInterface extends Closeable { * * @since 0.90.0 */ + @Override Result[] get(List gets) throws IOException; /** @@ -194,13 +207,14 @@ public interface HTableInterface extends Closeable { * @param family Column family to include in the {@link Result}. * @throws IOException if a remote or network exception occurs. * @since 0.20.0 - * + * * @deprecated As of version 0.92 this method is deprecated without * replacement. Since version 0.96+, you can use reversed scan. * getRowOrBefore is used internally to find entries in hbase:meta and makes * various assumptions about the table (which are true for hbase:meta but not * in general) to be efficient. */ + @Deprecated Result getRowOrBefore(byte[] row, byte[] family) throws IOException; /** @@ -214,6 +228,7 @@ public interface HTableInterface extends Closeable { * @throws IOException if a remote or network exception occurs. * @since 0.20.0 */ + @Override ResultScanner getScanner(Scan scan) throws IOException; /** @@ -247,6 +262,7 @@ public interface HTableInterface extends Closeable { * @throws IOException if a remote or network exception occurs. * @since 0.20.0 */ + @Override void put(Put put) throws IOException; /** @@ -265,6 +281,7 @@ public interface HTableInterface extends Closeable { * @throws IOException if a remote or network exception occurs. * @since 0.20.0 */ + @Override void put(List puts) throws IOException; /** @@ -280,9 +297,11 @@ public interface HTableInterface extends Closeable { * @throws IOException e * @return true if the new put was executed, false otherwise */ + @Override boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException; + @Override boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, byte[] value, Put put) throws IOException; /** @@ -292,6 +311,7 @@ public interface HTableInterface extends Closeable { * @throws IOException if a remote or network exception occurs. * @since 0.20.0 */ + @Override void delete(Delete delete) throws IOException; /** @@ -305,6 +325,7 @@ public interface HTableInterface extends Closeable { * that have not be successfully applied. * @since 0.20.1 */ + @Override void delete(List deletes) throws IOException; /** @@ -320,9 +341,11 @@ public interface HTableInterface extends Closeable { * @throws IOException e * @return true if the new delete was executed, false otherwise */ + @Override boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete) throws IOException; + @Override boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, byte[] value, Delete delete) throws IOException; @@ -333,6 +356,7 @@ public interface HTableInterface extends Closeable { * @param rm object that specifies the set of mutations to perform atomically * @throws IOException */ + @Override void mutateRow(final RowMutations rm) throws IOException; /** @@ -348,6 +372,7 @@ public interface HTableInterface extends Closeable { * @throws IOException e * @return values of columns after the append operation (maybe null) */ + @Override Result append(final Append append) throws IOException; /** @@ -363,6 +388,7 @@ public interface HTableInterface extends Closeable { * @throws IOException e * @return values of columns after the increment */ + @Override Result increment(final Increment increment) throws IOException; /** @@ -377,6 +403,7 @@ public interface HTableInterface extends Closeable { * @return The new value, post increment. * @throws IOException if a remote or network exception occurs. */ + @Override long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) throws IOException; @@ -397,6 +424,7 @@ public interface HTableInterface extends Closeable { * @return The new value, post increment. * @throws IOException if a remote or network exception occurs. */ + @Override long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, Durability durability) throws IOException; @@ -415,6 +443,7 @@ public interface HTableInterface extends Closeable { * {@link Put} operations don't get buffered/delayed and are immediately * executed. */ + @Override boolean isAutoFlush(); /** @@ -425,6 +454,7 @@ public interface HTableInterface extends Closeable { * {@link #isAutoFlush} is {@code true}. * @throws IOException if a remote or network exception occurs. */ + @Override void flushCommits() throws IOException; /** @@ -432,6 +462,7 @@ public interface HTableInterface extends Closeable { * * @throws IOException if a remote or network exception occurs. */ + @Override void close() throws IOException; /** @@ -461,7 +492,8 @@ public interface HTableInterface extends Closeable { * @param row The row key used to identify the remote region location * @return A CoprocessorRpcChannel instance */ - @InterfaceAudience.Private // TODO add coproc audience level + @Override + @InterfaceAudience.Private // TODO add coproc audience level CoprocessorRpcChannel coprocessorService(byte[] row); /** @@ -485,6 +517,7 @@ public interface HTableInterface extends Closeable { * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method * @return a map of result values keyed by region name */ + @Override @InterfaceAudience.Private // TODO add coproc audience level Map coprocessorService(final Class service, byte[] startKey, byte[] endKey, final Batch.Call callable) @@ -517,6 +550,7 @@ public interface HTableInterface extends Closeable { * @param Return type for the {@code callable} parameter's * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method */ + @Override @InterfaceAudience.Private // TODO add coproc audience level void coprocessorService(final Class service, byte[] startKey, byte[] endKey, final Batch.Call callable, @@ -568,11 +602,13 @@ public interface HTableInterface extends Closeable { * Setting clearBufferOnFail to false is deprecated since 0.96. * @see #flushCommits */ + @Override void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail); /** * Set the autoFlush behavior, without changing the value of {@code clearBufferOnFail} */ + @Override void setAutoFlushTo(boolean autoFlush); /** @@ -582,6 +618,7 @@ public interface HTableInterface extends Closeable { * {@code hbase.client.write.buffer}. * @return The size of the write buffer in bytes. */ + @Override long getWriteBufferSize(); /** @@ -592,6 +629,7 @@ public interface HTableInterface extends Closeable { * @param writeBufferSize The new write buffer size, in bytes. * @throws IOException if a remote or network exception occurs. */ + @Override void setWriteBufferSize(long writeBufferSize) throws IOException; /** @@ -599,7 +637,7 @@ public interface HTableInterface extends Closeable { * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all * the invocations to the same region server will be batched into one call. The coprocessor * service is invoked according to the service instance, method name and parameters. - * + * * @param methodDescriptor * the descriptor for the protobuf service method to call. * @param request @@ -618,6 +656,7 @@ public interface HTableInterface extends Closeable { * @throws Throwable * @return a map of result values keyed by region name */ + @Override @InterfaceAudience.Private Map batchCoprocessorService( Descriptors.MethodDescriptor methodDescriptor, Message request, @@ -628,13 +667,13 @@ public interface HTableInterface extends Closeable { * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all * the invocations to the same region server will be batched into one call. The coprocessor * service is invoked according to the service instance, method name and parameters. - * + * *

* The given * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)} * method will be called with the return value from each region's invocation. *

- * + * * @param methodDescriptor * the descriptor for the protobuf service method to call. * @param request @@ -654,6 +693,7 @@ public interface HTableInterface extends Closeable { * @throws ServiceException * @throws Throwable */ + @Override @InterfaceAudience.Private void batchCoprocessorService(Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, R responsePrototype, diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java index aad8403..95c0ccf 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java @@ -26,7 +26,7 @@ import java.io.IOException; /** * Interface for client-side scanning. - * Go to {@link HTable} to obtain instances. + * Go to {@link Table} to obtain instances. */ @InterfaceAudience.Public @InterfaceStability.Stable @@ -50,5 +50,6 @@ public interface ResultScanner extends Closeable, Iterable { /** * Closes the scanner and releases any resources it has allocated */ + @Override void close(); } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java new file mode 100644 index 0000000..438e679 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -0,0 +1,617 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; + +import com.google.protobuf.Descriptors; +import com.google.protobuf.Message; +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; + +/** + * A Table object to access a single HBase table. Implementations of + * this interface can be obtained via {@link Connection}. The caller is responsible for + * {@link #close()}'ing the table. + * + *

This class is NOT thread safe for reads and writes. However instances obtained + * from the Connection interface are light-weight. It is recommended to not share, cache + * or pool Table objects, but each thread obtain its own Table instance. + * + *

This class replaces {@link HTable} as the default way to access table data. Users should not + * construct HTable objects, but obtain a {@link Table} instance from the {@link Connection}. + * + *

Implementation note: This interface can be considered stable for clients using it. However, + * it is not intended to be extended/implemented by clients. New methods may be introduced in this + * interface in major or minor releases. + *

+ * @see Connection + * @see ConnectionFactory + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface Table extends Closeable { + + /** + * Gets the name of this table. + * + * @return the table name. + */ + byte[] getTableName(); + + /** + * Gets the fully qualified table name instance of this table. + */ + TableName getName(); + + /** + * Returns the {@link Configuration} object used by this instance. + *

+ * The reference returned is not a copy, so any change made to it will + * affect this instance. + */ + Configuration getConfiguration(); + + /** + * Gets the {@link HTableDescriptor table descriptor} for this table. + * @throws IOException if a remote or network exception occurs. + */ + HTableDescriptor getTableDescriptor() throws IOException; + + /** + * Test for the existence of columns in the table, as specified by the Get. + *

+ * + * This will return true if the Get matches one or more keys, false if not. + *

+ * + * This is a server-side call so it prevents any data from being transfered to + * the client. + * + * @param get the Get + * @return true if the specified Get matches one or more keys, false if not + * @throws IOException e + */ + boolean exists(Get get) throws IOException; + + /** + * Test for the existence of columns in the table, as specified by the Gets. + *

+ * + * This will return an array of booleans. Each value will be true if the related Get matches + * one or more keys, false if not. + *

+ * + * This is a server-side call so it prevents any data from being transfered to + * the client. + * + * @param gets the Gets + * @return Array of booleans, true if the specified Get matches one or more keys, false if not + * @throws IOException e + */ + //TODO: This sucks. This should return boolean[] instead of Boolean[]. Should we add a new method? + Boolean[] exists(List gets) throws IOException; + + /** + * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends and RowMutations. + * The ordering of execution of the actions is not defined. Meaning if you do a Put and a + * Get in the same {@link #batch} call, you will not necessarily be + * guaranteed that the Get returns what the Put had put. + * + * @param actions list of Get, Put, Delete, Increment, Append, RowMutations objects + * @param results Empty Object[], same size as actions. Provides access to partial + * results, in case an exception is thrown. A null in the result array means that + * the call for that action failed, even after retries + * @throws IOException + * @since 0.90.0 + */ + void batch(final List actions, final Object[] results) throws IOException, InterruptedException; + + /** + * Same as {@link #batch(List, Object[])}, but with a callback. + * @since 0.96.0 + */ + void batchCallback( + final List actions, final Object[] results, final Batch.Callback callback + ) + throws IOException, InterruptedException; + + /** + * Extracts certain cells from a given row. + * @param get The object that specifies what data to fetch and from which row. + * @return The data coming from the specified row, if it exists. If the row + * specified doesn't exist, the {@link Result} instance returned won't + * contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + Result get(Get get) throws IOException; + + /** + * Extracts certain cells from the given rows, in batch. + * + * @param gets The objects that specify what data to fetch and from which rows. + * + * @return The data coming from the specified rows, if it exists. If the row + * specified doesn't exist, the {@link Result} instance returned won't + * contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}. + * If there are any failures even after retries, there will be a null in + * the results array for those Gets, AND an exception will be thrown. + * @throws IOException if a remote or network exception occurs. + * + * @since 0.90.0 + */ + Result[] get(List gets) throws IOException; + + /** + * Returns a scanner on the current table as specified by the {@link Scan} + * object. + * Note that the passed {@link Scan}'s start row and caching properties + * maybe changed. + * + * @param scan A configured {@link Scan} object. + * @return A scanner. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + ResultScanner getScanner(Scan scan) throws IOException; + + /** + * Puts some data in the table. + *

+ * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered + * until the internal buffer is full. + * @param put The data to put. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + void put(Put put) throws IOException; + + /** + * Puts some data in the table, in batch. + *

+ * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered + * until the internal buffer is full. + *

+ * This can be used for group commit, or for submitting user defined + * batches. The writeBuffer will be periodically inspected while the List + * is processed, so depending on the List size the writeBuffer may flush + * not at all, or more than once. + * @param puts The list of mutations to apply. The batch put is done by + * aggregating the iteration of the Puts over the write buffer + * at the client-side for a single RPC call. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + void put(List puts) throws IOException; + + /** + * Atomically checks if a row/family/qualifier value matches the expected + * value. If it does, it adds the put. If the passed value is null, the check + * is for the lack of column (ie: non-existance) + * + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param value the expected value + * @param put data to put if check succeeds + * @throws IOException e + * @return true if the new put was executed, false otherwise + */ + boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, + byte[] value, Put put) throws IOException; + + /** + * Atomically checks if a row/family/qualifier value matches the expected + * value. If it does, it adds the put. If the passed value is null, the check + * is for the lack of column (ie: non-existance) + * + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param compareOp a comparison operator + * @param value the expected value + * @param put data to put if check succeeds + * @throws IOException e + * @return true if the new put was executed, false otherwise + */ + boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, + CompareOp compareOp, byte[] value, Put put) throws IOException; + + /** + * Deletes the specified cells/row. + * + * @param delete The object that specifies what to delete. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + void delete(Delete delete) throws IOException; + + /** + * Deletes the specified cells/rows in bulk. + * @param deletes List of things to delete. List gets modified by this + * method (in particular it gets re-ordered, so the order in which the elements + * are inserted in the list gives no guarantee as to the order in which the + * {@link Delete}s are executed). + * @throws IOException if a remote or network exception occurs. In that case + * the {@code deletes} argument will contain the {@link Delete} instances + * that have not be successfully applied. + * @since 0.20.1 + */ + void delete(List deletes) throws IOException; + + /** + * Atomically checks if a row/family/qualifier value matches the expected + * value. If it does, it adds the delete. If the passed value is null, the + * check is for the lack of column (ie: non-existance) + * + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param value the expected value + * @param delete data to delete if check succeeds + * @throws IOException e + * @return true if the new delete was executed, false otherwise + */ + boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, + byte[] value, Delete delete) throws IOException; + + /** + * Atomically checks if a row/family/qualifier value matches the expected + * value. If it does, it adds the delete. If the passed value is null, the + * check is for the lack of column (ie: non-existance) + * + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param compareOp a comparison operator + * @param value the expected value + * @param delete data to delete if check succeeds + * @throws IOException e + * @return true if the new delete was executed, false otherwise + */ + boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, + CompareOp compareOp, byte[] value, Delete delete) throws IOException; + + /** + * Performs multiple mutations atomically on a single row. Currently + * {@link Put} and {@link Delete} are supported. + * + * @param rm object that specifies the set of mutations to perform atomically + * @throws IOException + */ + void mutateRow(final RowMutations rm) throws IOException; + + /** + * Appends values to one or more columns within a single row. + *

+ * This operation does not appear atomic to readers. Appends are done + * under a single row lock, so write operations to a row are synchronized, but + * readers do not take row locks so get and scan operations can see this + * operation partially completed. + * + * @param append object that specifies the columns and amounts to be used + * for the increment operations + * @throws IOException e + * @return values of columns after the append operation (maybe null) + */ + Result append(final Append append) throws IOException; + + /** + * Increments one or more columns within a single row. + *

+ * This operation does not appear atomic to readers. Increments are done + * under a single row lock, so write operations to a row are synchronized, but + * readers do not take row locks so get and scan operations can see this + * operation partially completed. + * + * @param increment object that specifies the columns and amounts to be used + * for the increment operations + * @throws IOException e + * @return values of columns after the increment + */ + Result increment(final Increment increment) throws IOException; + + /** + * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)} + *

+ * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}. + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. + * @param qualifier The column qualifier of the cell to increment. + * @param amount The amount to increment the cell with (or decrement, if the + * amount is negative). + * @return The new value, post increment. + * @throws IOException if a remote or network exception occurs. + */ + long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, + long amount) throws IOException; + + /** + * Atomically increments a column value. If the column value already exists + * and is not a big-endian long, this could throw an exception. If the column + * value does not yet exist it is initialized to amount and + * written to the specified column. + * + *

Setting durability to {@link Durability#SKIP_WAL} means that in a fail + * scenario you will lose any increments that have not been flushed. + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. + * @param qualifier The column qualifier of the cell to increment. + * @param amount The amount to increment the cell with (or decrement, if the + * amount is negative). + * @param durability The persistence guarantee for this increment. + * @return The new value, post increment. + * @throws IOException if a remote or network exception occurs. + */ + long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, + long amount, Durability durability) throws IOException; + + /** + * Tells whether or not 'auto-flush' is turned on. + * + * @return {@code true} if 'auto-flush' is enabled (default), meaning + * {@link Put} operations don't get buffered/delayed and are immediately + * executed. + */ + boolean isAutoFlush(); + + /** + * Executes all the buffered {@link Put} operations. + *

+ * This method gets called once automatically for every {@link Put} or batch + * of {@link Put}s (when put(List) is used) when + * {@link #isAutoFlush} is {@code true}. + * @throws IOException if a remote or network exception occurs. + */ + void flushCommits() throws IOException; + + /** + * Releases any resources held or pending changes in internal buffers. + * + * @throws IOException if a remote or network exception occurs. + */ + @Override + void close() throws IOException; + + /** + * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the + * table region containing the specified row. The row given does not actually have + * to exist. Whichever region would contain the row based on start and end keys will + * be used. Note that the {@code row} parameter is also not passed to the + * coprocessor handler registered for this protocol, unless the {@code row} + * is separately passed as an argument in the service request. The parameter + * here is only used to locate the region used to handle the call. + * + *

+ * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published + * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: + *

+ * + *
+ *
+   * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
+   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
+   * MyCallRequest request = MyCallRequest.newBuilder()
+   *     ...
+   *     .build();
+   * MyCallResponse response = service.myCall(null, request);
+   * 
+ * + * @param row The row key used to identify the remote region location + * @return A CoprocessorRpcChannel instance + */ + @InterfaceAudience.Private // TODO add coproc audience level + CoprocessorRpcChannel coprocessorService(byte[] row); + + /** + * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table + * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), + * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} + * method with each {@link Service} + * instance. + * + * @param service the protocol buffer {@code Service} implementation to call + * @param startKey start region selection with region containing this row. If {@code null}, the + * selection will start with the first table region. + * @param endKey select regions up to and including the region containing this row. + * If {@code null}, selection will continue through the last table region. + * @param callable this instance's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} + * method will be invoked once per table region, using the {@link Service} + * instance connected to that region. + * @param the {@link Service} subclass to connect to + * @param Return type for the {@code callable} parameter's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method + * @return a map of result values keyed by region name + */ + @InterfaceAudience.Private // TODO add coproc audience level + Map coprocessorService(final Class service, + byte[] startKey, byte[] endKey, final Batch.Call callable) + throws ServiceException, Throwable; + + /** + * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table + * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), + * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} + * method with each {@link Service} instance. + * + *

+ * The given + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)} + * method will be called with the return value from each region's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. + *

+ * + * @param service the protocol buffer {@code Service} implementation to call + * @param startKey start region selection with region containing this row. If {@code null}, the + * selection will start with the first table region. + * @param endKey select regions up to and including the region containing this row. + * If {@code null}, selection will continue through the last table region. + * @param callable this instance's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method + * will be invoked once per table region, using the {@link Service} instance + * connected to that region. + * @param callback + * @param the {@link Service} subclass to connect to + * @param Return type for the {@code callable} parameter's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method + */ + @InterfaceAudience.Private // TODO add coproc audience level + void coprocessorService(final Class service, + byte[] startKey, byte[] endKey, final Batch.Call callable, + final Batch.Callback callback) throws ServiceException, Throwable; + + /** + * Turns 'auto-flush' on or off. + *

+ * When enabled (default), {@link Put} operations don't get buffered/delayed + * and are immediately executed. Failed operations are not retried. This is + * slower but safer. + *

+ * Turning off {@code #autoFlush} means that multiple {@link Put}s will be + * accepted before any RPC is actually sent to do the write operations. If the + * application dies before pending writes get flushed to HBase, data will be + * lost. + *

+ * When you turn {@code #autoFlush} off, you should also consider the + * {@code #clearBufferOnFail} option. By default, asynchronous {@link Put} + * requests will be retried on failure until successful. However, this can + * pollute the writeBuffer and slow down batching performance. Additionally, + * you may want to issue a number of Put requests and call + * {@link #flushCommits()} as a barrier. In both use cases, consider setting + * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()} + * has been called, regardless of success. + *

+ * In other words, if you call {@code #setAutoFlush(false)}; HBase will retry N time for each + * flushCommit, including the last one when closing the table. This is NOT recommended, + * most of the time you want to call {@code #setAutoFlush(false, true)}. + * + * @param autoFlush + * Whether or not to enable 'auto-flush'. + * @param clearBufferOnFail + * Whether to keep Put failures in the writeBuffer. If autoFlush is true, then + * the value of this parameter is ignored and clearBufferOnFail is set to true. + * Setting clearBufferOnFail to false is deprecated since 0.96. + * @see #flushCommits + */ + void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail); + + /** + * Set the autoFlush behavior, without changing the value of {@code clearBufferOnFail} + */ + void setAutoFlushTo(boolean autoFlush); + + /** + * Returns the maximum size in bytes of the write buffer for this Table. + *

+ * The default value comes from the configuration parameter + * {@code hbase.client.write.buffer}. + * @return The size of the write buffer in bytes. + */ + long getWriteBufferSize(); + + /** + * Sets the size of the buffer in bytes. + *

+ * If the new size is less than the current amount of data in the + * write buffer, the buffer gets flushed. + * @param writeBufferSize The new write buffer size, in bytes. + * @throws IOException if a remote or network exception occurs. + */ + void setWriteBufferSize(long writeBufferSize) throws IOException; + + /** + * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table + * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all + * the invocations to the same region server will be batched into one call. The coprocessor + * service is invoked according to the service instance, method name and parameters. + * + * @param methodDescriptor + * the descriptor for the protobuf service method to call. + * @param request + * the method call parameters + * @param startKey + * start region selection with region containing this row. If {@code null}, the + * selection will start with the first table region. + * @param endKey + * select regions up to and including the region containing this row. If {@code null}, + * selection will continue through the last table region. + * @param responsePrototype + * the proto type of the response of the method in Service. + * @param + * the response type for the coprocessor Service method + * @throws ServiceException + * @throws Throwable + * @return a map of result values keyed by region name + */ + @InterfaceAudience.Private + Map batchCoprocessorService( + Descriptors.MethodDescriptor methodDescriptor, Message request, + byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable; + + /** + * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table + * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all + * the invocations to the same region server will be batched into one call. The coprocessor + * service is invoked according to the service instance, method name and parameters. + * + *

+ * The given + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)} + * method will be called with the return value from each region's invocation. + *

+ * + * @param methodDescriptor + * the descriptor for the protobuf service method to call. + * @param request + * the method call parameters + * @param startKey + * start region selection with region containing this row. If {@code null}, the + * selection will start with the first table region. + * @param endKey + * select regions up to and including the region containing this row. If {@code null}, + * selection will continue through the last table region. + * @param responsePrototype + * the proto type of the response of the method in Service. + * @param callback + * callback to invoke with the response for each region + * @param + * the response type for the coprocessor Service method + * @throws ServiceException + * @throws Throwable + */ + @InterfaceAudience.Private + void batchCoprocessorService(Descriptors.MethodDescriptor methodDescriptor, + Message request, byte[] startKey, byte[] endKey, R responsePrototype, + Batch.Callback callback) throws ServiceException, Throwable; +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java index 85c86f9..d16e3fc 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java @@ -85,6 +85,7 @@ public class CoprocessorHConnection implements ClusterConnection { this.delegate = delegate; } + @Override public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface getClient(ServerName serverName) throws IOException { // client is trying to reach off-server, so we can't do anything special @@ -96,262 +97,329 @@ public class CoprocessorHConnection implements ClusterConnection { return server.getRSRpcServices(); } + @Override public void abort(String why, Throwable e) { delegate.abort(why, e); } + @Override public boolean isAborted() { return delegate.isAborted(); } + @Override public Configuration getConfiguration() { return delegate.getConfiguration(); } + @Override public HTableInterface getTable(String tableName) throws IOException { return delegate.getTable(tableName); } + @Override public HTableInterface getTable(byte[] tableName) throws IOException { return delegate.getTable(tableName); } + @Override public HTableInterface getTable(TableName tableName) throws IOException { return delegate.getTable(tableName); } + @Override public HTableInterface getTable(String tableName, ExecutorService pool) throws IOException { return delegate.getTable(tableName, pool); } + @Override public HTableInterface getTable(byte[] tableName, ExecutorService pool) throws IOException { return delegate.getTable(tableName, pool); } + @Override public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException { return delegate.getTable(tableName, pool); } + @Override + public Table getTable(TableName tableName, Configuration conf, ExecutorService pool) + throws IOException { + return delegate.getTable(tableName, conf, pool); + } + + @Override public Admin getAdmin() throws IOException { return delegate.getAdmin(); } + @Override public boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException { return delegate.isMasterRunning(); } + @Override public boolean isTableEnabled(TableName tableName) throws IOException { return delegate.isTableEnabled(tableName); } + @Override public boolean isTableEnabled(byte[] tableName) throws IOException { return delegate.isTableEnabled(tableName); } + @Override public boolean isTableDisabled(TableName tableName) throws IOException { return delegate.isTableDisabled(tableName); } + @Override public boolean isTableDisabled(byte[] tableName) throws IOException { return delegate.isTableDisabled(tableName); } + @Override public boolean isTableAvailable(TableName tableName) throws IOException { return delegate.isTableAvailable(tableName); } + @Override public boolean isTableAvailable(byte[] tableName) throws IOException { return delegate.isTableAvailable(tableName); } + @Override public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException { return delegate.isTableAvailable(tableName, splitKeys); } + @Override public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException { return delegate.isTableAvailable(tableName, splitKeys); } + @Override public HTableDescriptor[] listTables() throws IOException { return delegate.listTables(); } + @Override public String[] getTableNames() throws IOException { return delegate.getTableNames(); } + @Override public TableName[] listTableNames() throws IOException { return delegate.listTableNames(); } + @Override public HTableDescriptor getHTableDescriptor(TableName tableName) throws IOException { return delegate.getHTableDescriptor(tableName); } + @Override public HTableDescriptor getHTableDescriptor(byte[] tableName) throws IOException { return delegate.getHTableDescriptor(tableName); } + @Override public HRegionLocation locateRegion(TableName tableName, byte[] row) throws IOException { return delegate.locateRegion(tableName, row); } + @Override public HRegionLocation locateRegion(byte[] tableName, byte[] row) throws IOException { return delegate.locateRegion(tableName, row); } + @Override public void clearRegionCache() { delegate.clearRegionCache(); } + @Override public void clearRegionCache(TableName tableName) { delegate.clearRegionCache(tableName); } + @Override public void clearRegionCache(byte[] tableName) { delegate.clearRegionCache(tableName); } + @Override public HRegionLocation relocateRegion(TableName tableName, byte[] row) throws IOException { return delegate.relocateRegion(tableName, row); } + @Override public HRegionLocation relocateRegion(byte[] tableName, byte[] row) throws IOException { return delegate.relocateRegion(tableName, row); } + @Override public void updateCachedLocations(TableName tableName, byte[] rowkey, Object exception, ServerName source) { delegate.updateCachedLocations(tableName, rowkey, exception, source); } + @Override public void updateCachedLocations(TableName tableName, byte[] rowkey, Object exception, HRegionLocation source) { delegate.updateCachedLocations(tableName, rowkey, exception, source); } + @Override public void updateCachedLocations(byte[] tableName, byte[] rowkey, Object exception, HRegionLocation source) { delegate.updateCachedLocations(tableName, rowkey, exception, source); } + @Override public HRegionLocation locateRegion(byte[] regionName) throws IOException { return delegate.locateRegion(regionName); } + @Override public List locateRegions(TableName tableName) throws IOException { return delegate.locateRegions(tableName); } + @Override public List locateRegions(byte[] tableName) throws IOException { return delegate.locateRegions(tableName); } + @Override public List locateRegions(TableName tableName, boolean useCache, boolean offlined) throws IOException { return delegate.locateRegions(tableName, useCache, offlined); } + @Override public List locateRegions(byte[] tableName, boolean useCache, boolean offlined) throws IOException { return delegate.locateRegions(tableName, useCache, offlined); } + @Override public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface getMaster() throws IOException { return delegate.getMaster(); } + @Override public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface getAdmin(ServerName serverName) throws IOException { return delegate.getAdmin(serverName); } + @Override public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface getAdmin(ServerName serverName, boolean getMaster) throws IOException { return delegate.getAdmin(serverName, getMaster); } + @Override public HRegionLocation getRegionLocation(TableName tableName, byte[] row, boolean reload) throws IOException { return delegate.getRegionLocation(tableName, row, reload); } + @Override public HRegionLocation getRegionLocation(byte[] tableName, byte[] row, boolean reload) throws IOException { return delegate.getRegionLocation(tableName, row, reload); } + @Override public void processBatch(List actions, TableName tableName, ExecutorService pool, Object[] results) throws IOException, InterruptedException { delegate.processBatch(actions, tableName, pool, results); } + @Override public void processBatch(List actions, byte[] tableName, ExecutorService pool, Object[] results) throws IOException, InterruptedException { delegate.processBatch(actions, tableName, pool, results); } + @Override public void processBatchCallback(List list, TableName tableName, ExecutorService pool, Object[] results, Callback callback) throws IOException, InterruptedException { delegate.processBatchCallback(list, tableName, pool, results, callback); } + @Override public void processBatchCallback(List list, byte[] tableName, ExecutorService pool, Object[] results, Callback callback) throws IOException, InterruptedException { delegate.processBatchCallback(list, tableName, pool, results, callback); } + @Override public void setRegionCachePrefetch(TableName tableName, boolean enable) { delegate.setRegionCachePrefetch(tableName, enable); } + @Override public void setRegionCachePrefetch(byte[] tableName, boolean enable) { delegate.setRegionCachePrefetch(tableName, enable); } + @Override public boolean getRegionCachePrefetch(TableName tableName) { return delegate.getRegionCachePrefetch(tableName); } + @Override public boolean getRegionCachePrefetch(byte[] tableName) { return delegate.getRegionCachePrefetch(tableName); } + @Override public int getCurrentNrHRS() throws IOException { return delegate.getCurrentNrHRS(); } + @Override public HTableDescriptor[] getHTableDescriptorsByTableName(List tableNames) throws IOException { return delegate.getHTableDescriptorsByTableName(tableNames); } + @Override public HTableDescriptor[] getHTableDescriptors(List tableNames) throws IOException { return delegate.getHTableDescriptors(tableNames); } + @Override public boolean isClosed() { return delegate.isClosed(); } + @Override public void clearCaches(ServerName sn) { delegate.clearCaches(sn); } + @Override public void close() throws IOException { delegate.close(); } + @Override public void deleteCachedRegionLocation(HRegionLocation location) { delegate.deleteCachedRegionLocation(location); } + @Override public MasterKeepAliveConnection getKeepAliveMasterService() throws MasterNotRunningException { return delegate.getKeepAliveMasterService(); } + @Override public boolean isDeadServer(ServerName serverName) { return delegate.isDeadServer(serverName); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 12c86bf..c310b77 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; @@ -1630,7 +1631,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return Count of rows loaded. * @throws IOException */ - public int loadTable(final HTable t, final byte[] f) throws IOException { + public int loadTable(final Table t, final byte[] f) throws IOException { return loadTable(t, new byte[][] {f}); } @@ -1641,7 +1642,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return Count of rows loaded. * @throws IOException */ - public int loadTable(final HTable t, final byte[] f, boolean writeToWAL) throws IOException { + public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException { return loadTable(t, new byte[][] {f}, null, writeToWAL); } @@ -1652,7 +1653,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return Count of rows loaded. * @throws IOException */ - public int loadTable(final HTable t, final byte[][] f) throws IOException { + public int loadTable(final Table t, final byte[][] f) throws IOException { return loadTable(t, f, null); } @@ -1664,7 +1665,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return Count of rows loaded. * @throws IOException */ - public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException { + public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException { return loadTable(t, f, value, true); } @@ -1676,8 +1677,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return Count of rows loaded. * @throws IOException */ - public int loadTable(final HTable t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException { - t.setAutoFlush(false); + public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException { + t.setAutoFlushTo(false); int rowCount = 0; for (byte[] row : HBaseTestingUtility.ROWS) { Put put = new Put(row); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index 27687b3..2ec0d4d 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -345,6 +345,7 @@ public class TestHCM { final AtomicReference failed = new AtomicReference(null); Thread t = new Thread("testConnectionCloseThread") { + @Override public void run() { int done = 0; try {