diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
index 37f1a33..362439a 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
@@ -20,7 +20,7 @@ import java.util.concurrent.ExecutorService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
/**
* Coprocessor environment state.
@@ -50,14 +50,14 @@ public interface CoprocessorEnvironment {
* @return an interface for accessing the given table
* @throws IOException
*/
- HTableInterface getTable(TableName tableName) throws IOException;
+ Table getTable(TableName tableName) throws IOException;
/**
* @return an interface for accessing the given table using the passed executor to run batch
* operations
* @throws IOException
*/
- HTableInterface getTable(TableName tableName, ExecutorService service) throws IOException;
+ Table getTable(TableName tableName, ExecutorService service) throws IOException;
/**
* @return the classloader for the loaded coprocessor instance
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 2e6723a..8297ff8 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.NeedUnmanagedConnectionException;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
@@ -241,7 +242,7 @@ public class MetaTableAccessor {
// There should still be a way to use this method with an unmanaged connection.
if (connection instanceof ClusterConnection) {
if (((ClusterConnection) connection).isManaged()) {
- return new HTable(TableName.META_TABLE_NAME, connection);
+ throw new NeedUnmanagedConnectionException();
}
}
return connection.getTable(TableName.META_TABLE_NAME);
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index d11eadd..953fb46 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -127,78 +127,6 @@ public class HTable implements HTableInterface {
private RpcRetryingCallerFactory rpcCallerFactory;
private RpcControllerFactory rpcControllerFactory;
- /**
- * Creates an object to access a HBase table.
- * @param conf Configuration object to use.
- * @param tableName Name of the table.
- * @throws IOException if a remote or network exception occurs
- * @deprecated Constructing HTable objects manually has been deprecated. Please use
- * {@link Connection} to instantiate a {@link Table} instead.
- */
- @Deprecated
- public HTable(Configuration conf, final String tableName)
- throws IOException {
- this(conf, TableName.valueOf(tableName));
- }
-
- /**
- * Creates an object to access a HBase table.
- * @param conf Configuration object to use.
- * @param tableName Name of the table.
- * @throws IOException if a remote or network exception occurs
- * @deprecated Constructing HTable objects manually has been deprecated. Please use
- * {@link Connection} to instantiate a {@link Table} instead.
- */
- @Deprecated
- public HTable(Configuration conf, final byte[] tableName)
- throws IOException {
- this(conf, TableName.valueOf(tableName));
- }
-
- /**
- * Creates an object to access a HBase table.
- * @param conf Configuration object to use.
- * @param tableName table name pojo
- * @throws IOException if a remote or network exception occurs
- * @deprecated Constructing HTable objects manually has been deprecated. Please use
- * {@link Connection} to instantiate a {@link Table} instead.
- */
- @Deprecated
- public HTable(Configuration conf, final TableName tableName)
- throws IOException {
- this.tableName = tableName;
- this.cleanupPoolOnClose = true;
- this.cleanupConnectionOnClose = true;
- if (conf == null) {
- this.connection = null;
- return;
- }
- this.connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
- this.configuration = conf;
-
- this.pool = getDefaultExecutor(conf);
- this.finishSetup();
- }
-
- /**
- * Creates an object to access a HBase table.
- * @param tableName Name of the table.
- * @param connection HConnection to be used.
- * @throws IOException if a remote or network exception occurs
- * @deprecated Do not use.
- */
- @Deprecated
- public HTable(TableName tableName, Connection connection) throws IOException {
- this.tableName = tableName;
- this.cleanupPoolOnClose = true;
- this.cleanupConnectionOnClose = false;
- this.connection = (ClusterConnection)connection;
- this.configuration = connection.getConfiguration();
-
- this.pool = getDefaultExecutor(this.configuration);
- this.finishSetup();
- }
-
// Marked Private @since 1.0
@InterfaceAudience.Private
public static ThreadPoolExecutor getDefaultExecutor(Configuration conf) {
@@ -220,68 +148,6 @@ public class HTable implements HTableInterface {
/**
* Creates an object to access a HBase table.
- * @param conf Configuration object to use.
- * @param tableName Name of the table.
- * @param pool ExecutorService to be used.
- * @throws IOException if a remote or network exception occurs
- * @deprecated Constructing HTable objects manually has been deprecated. Please use
- * {@link Connection} to instantiate a {@link Table} instead.
- */
- @Deprecated
- public HTable(Configuration conf, final byte[] tableName, final ExecutorService pool)
- throws IOException {
- this(conf, TableName.valueOf(tableName), pool);
- }
-
- /**
- * Creates an object to access a HBase table.
- * @param conf Configuration object to use.
- * @param tableName Name of the table.
- * @param pool ExecutorService to be used.
- * @throws IOException if a remote or network exception occurs
- * @deprecated Constructing HTable objects manually has been deprecated. Please use
- * {@link Connection} to instantiate a {@link Table} instead.
- */
- @Deprecated
- public HTable(Configuration conf, final TableName tableName, final ExecutorService pool)
- throws IOException {
- this.connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
- this.configuration = conf;
- this.pool = pool;
- if (pool == null) {
- this.pool = getDefaultExecutor(conf);
- this.cleanupPoolOnClose = true;
- } else {
- this.cleanupPoolOnClose = false;
- }
- this.tableName = tableName;
- this.cleanupConnectionOnClose = true;
- this.finishSetup();
- }
-
- /**
- * Creates an object to access a HBase table.
- * @param tableName Name of the table.
- * @param connection HConnection to be used.
- * @param pool ExecutorService to be used.
- * @throws IOException if a remote or network exception occurs.
- * @deprecated Do not use, internal ctor.
- */
- @Deprecated
- public HTable(final byte[] tableName, final Connection connection,
- final ExecutorService pool) throws IOException {
- this(TableName.valueOf(tableName), connection, pool);
- }
-
- /** @deprecated Do not use, internal ctor. */
- @Deprecated
- public HTable(TableName tableName, final Connection connection,
- final ExecutorService pool) throws IOException {
- this(tableName, (ClusterConnection)connection, null, null, null, pool);
- }
-
- /**
- * Creates an object to access a HBase table.
* Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to
* get a {@link Table} instance (use {@link Table} instead of {@link HTable}).
* @param tableName Name of the table.
@@ -290,7 +156,7 @@ public class HTable implements HTableInterface {
* @throws IOException if a remote or network exception occurs
*/
@InterfaceAudience.Private
- public HTable(TableName tableName, final ClusterConnection connection,
+ protected HTable(TableName tableName, final ClusterConnection connection,
final TableConfiguration tableConfig,
final RpcRetryingCallerFactory rpcCallerFactory,
final RpcControllerFactory rpcControllerFactory,
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
index 7d91dbb..15247ff 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
@@ -64,7 +64,7 @@ public final class MetaScanner {
/**
* Scans the meta table and calls a visitor on each RowResult and uses a empty
* start row value as table name.
- *
+ *
*
Visible for testing. Use {@link
* #metaScan(Connection, MetaScannerVisitor, TableName)} instead.
*
@@ -97,7 +97,7 @@ public final class MetaScanner {
* Scans the meta table and calls a visitor on each RowResult. Uses a table
* name and a row name to locate meta regions. And it only scans at most
* rowLimit of rows.
- *
+ *
*
Visible for testing. Use {@link
* #metaScan(Connection, MetaScannerVisitor, TableName)} instead.
*
@@ -150,7 +150,8 @@ public final class MetaScanner {
// managed connections getting tables. Leaving this as it is for now. Will
// revisit when inclined to change all tests. User code probaby makes use of
// managed connections too so don't change it till post hbase 1.0.
- try (Table metaTable = new HTable(TableName.META_TABLE_NAME, connection, null)) {
+
+ try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME, null)) {
if (row != null) {
// Scan starting at a particular row in a particular table
Result startRowResult = getClosestRowOrBefore(metaTable, tableName, row,
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
index c16b4c3..4e5073f 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
@@ -58,23 +58,23 @@ import com.google.protobuf.ServiceException;
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
@InterfaceStability.Stable
-public final class HTableWrapper implements HTableInterface {
+public final class HTableWrapper implements Table {
- private final HTableInterface table;
+ private final Table table;
private ClusterConnection connection;
- private final List openTables;
+ private final List openTables;
/**
* @param openTables External list of tables used for tracking wrappers.
* @throws IOException
*/
- public static HTableInterface createWrapper(List openTables,
+ public static Table createWrapper(List openTables,
TableName tableName, Environment env, ExecutorService pool) throws IOException {
return new HTableWrapper(openTables, tableName,
CoprocessorHConnection.getConnectionForEnvironment(env), pool);
}
- private HTableWrapper(List openTables, TableName tableName,
+ private HTableWrapper(List openTables, TableName tableName,
ClusterConnection connection, ExecutorService pool)
throws IOException {
this.table = connection.getTable(tableName, pool);
@@ -215,14 +215,6 @@ public final class HTableWrapper implements HTableInterface {
return table.increment(increment);
}
- public void flushCommits() throws IOException {
- table.flushCommits();
- }
-
- public boolean isAutoFlush() {
- return table.isAutoFlush();
- }
-
public ResultScanner getScanner(Scan scan) throws IOException {
return table.getScanner(scan);
}
@@ -241,11 +233,6 @@ public final class HTableWrapper implements HTableInterface {
}
@Override
- public byte[] getTableName() {
- return table.getTableName();
- }
-
- @Override
public TableName getName() {
return table.getName();
}
@@ -317,30 +304,6 @@ public final class HTableWrapper implements HTableInterface {
}
@Override
- public void setAutoFlush(boolean autoFlush) {
- table.setAutoFlush(autoFlush);
- }
-
- @Override
- public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
- setAutoFlush(autoFlush);
- if (!autoFlush && !clearBufferOnFail) {
- // We don't support his combination. In HTable, the implementation is this:
- //
- // this.clearBufferOnFail = autoFlush || clearBufferOnFail
- //
- // So if autoFlush == false and clearBufferOnFail is false, that is not supported in
- // the new Table Interface so just throwing UnsupportedOperationException here.
- throw new UnsupportedOperationException("Can't do this via wrapper");
- }
- }
-
- @Override
- public void setAutoFlushTo(boolean autoFlush) {
- table.setAutoFlushTo(autoFlush);
- }
-
- @Override
public long getWriteBufferSize() {
return table.getWriteBufferSize();
}
@@ -351,13 +314,6 @@ public final class HTableWrapper implements HTableInterface {
}
@Override
- public long incrementColumnValue(byte[] row, byte[] family,
- byte[] qualifier, long amount, boolean writeToWAL) throws IOException {
- return table.incrementColumnValue(row, family, qualifier, amount,
- writeToWAL? Durability.USE_DEFAULT: Durability.SKIP_WAL);
- }
-
- @Override
public Map batchCoprocessorService(
MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey,
R responsePrototype) throws ServiceException, Throwable {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
index 237f617..b047d33 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
@@ -46,8 +46,8 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTableWrapper;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
import org.apache.hadoop.hbase.util.SortedCopyOnWriteSet;
import org.apache.hadoop.hbase.util.VersionInfo;
@@ -377,8 +377,8 @@ public abstract class CoprocessorHost {
/** Current coprocessor state */
Coprocessor.State state = Coprocessor.State.UNINSTALLED;
/** Accounting for tables opened by the coprocessor */
- protected List openTables =
- Collections.synchronizedList(new ArrayList());
+ protected List openTables =
+ Collections.synchronizedList(new ArrayList());
private int seq;
private Configuration conf;
private ClassLoader classLoader;
@@ -438,7 +438,7 @@ public abstract class CoprocessorHost {
" because not active (state="+state.toString()+")");
}
// clean up any table references
- for (HTableInterface table: openTables) {
+ for (Table table: openTables) {
try {
((HTableWrapper)table).internalClose();
} catch (IOException e) {
@@ -493,7 +493,7 @@ public abstract class CoprocessorHost {
* @exception java.io.IOException Exception
*/
@Override
- public HTableInterface getTable(TableName tableName) throws IOException {
+ public Table getTable(TableName tableName) throws IOException {
return this.getTable(tableName, HTable.getDefaultExecutor(getConfiguration()));
}
@@ -504,7 +504,7 @@ public abstract class CoprocessorHost {
* @exception java.io.IOException Exception
*/
@Override
- public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException {
+ public Table getTable(TableName tableName, ExecutorService pool) throws IOException {
return HTableWrapper.createWrapper(openTables, tableName, this, pool);
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
index dd58d5c..f8ccea3 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.Filter;
@@ -83,7 +84,8 @@ public abstract class TableInputFormatBase
implements InputFormat {
private static final Log LOG = LogFactory.getLog(TableInputFormatBase.class);
private byte [][] inputColumns;
- private HTable table;
+ private Table table;
+ private RegionLocator regionLocator;
private Connection connection;
private TableRecordReader tableRecordReader;
private Filter rowFilter;
@@ -197,7 +199,7 @@ implements InputFormat {
throw new IOException(INITIALIZATION_ERROR, exception);
}
- byte [][] startKeys = this.table.getStartKeys();
+ byte [][] startKeys = this.regionLocator.getStartKeys();
if (startKeys == null || startKeys.length == 0) {
throw new IOException("Expecting at least one region");
}
@@ -212,7 +214,7 @@ implements InputFormat {
for (int i = 0; i < realNumSplits; i++) {
int lastPos = startPos + middle;
lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos;
- String regionLocation = table.getRegionLocation(startKeys[startPos]).
+ String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]).
getHostname();
splits[i] = new TableSplit(this.table.getName(),
startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]:
@@ -235,7 +237,8 @@ implements InputFormat {
LOG.warn("initializeTable called multiple times. Overwriting connection and table " +
"reference; TableInputFormatBase will not close these old references when done.");
}
- this.table = (HTable) connection.getTable(tableName);
+ this.table = connection.getTable(tableName);
+ this.regionLocator = connection.getRegionLocator(tableName);
this.connection = connection;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
index 21714af..8fa711c 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.security.UserGroupInformation;
@@ -146,7 +146,7 @@ public class ConnectionCache {
/**
* Caller closes the table afterwards.
*/
- public HTableInterface getTable(String tableName) throws IOException {
+ public Table getTable(String tableName) throws IOException {
ConnectionInfo connInfo = getCurrentConnection();
return connInfo.connection.getTable(tableName);
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 2ef7451..ba4a7f9 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -1023,7 +1023,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
// Don't leave here till we've done a successful scan of the hbase:meta
- Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
+ Connection conn = ConnectionFactory.createConnection(this.conf);
+ Table t = conn.getTable(TableName.META_TABLE_NAME);
ResultScanner s = t.getScanner(new Scan());
while (s.next() != null) {
// do nothing
@@ -1031,6 +1032,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
LOG.info("HBase has been restarted");
s.close();
t.close();
+ conn.close();
}
/**
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java
index 317707a..986b9fb 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@@ -50,6 +49,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.junit.After;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -86,7 +86,7 @@ public class TestHTableWrapper {
static class DummyRegionObserver extends BaseRegionObserver {
}
- private HTableInterface hTableInterface;
+ private Table hTableInterface;
private Table table;
@BeforeClass
@@ -144,10 +144,8 @@ public class TestHTableWrapper {
private void checkHTableInterfaceMethods() throws Exception {
checkConf();
checkNameAndDescriptor();
- checkAutoFlush();
checkBufferSize();
checkExists();
- checkGetRowOrBefore();
checkAppend();
checkPutsAndDeletes();
checkCheckAndPut();
@@ -159,7 +157,6 @@ public class TestHTableWrapper {
checkMutateRow();
checkResultScanner();
- hTableInterface.flushCommits();
hTableInterface.close();
}
@@ -174,15 +171,6 @@ public class TestHTableWrapper {
assertEquals(table.getTableDescriptor(), hTableInterface.getTableDescriptor());
}
- private void checkAutoFlush() {
- boolean initialAutoFlush = hTableInterface.isAutoFlush();
- hTableInterface.setAutoFlush(false);
- assertFalse(hTableInterface.isAutoFlush());
- hTableInterface.setAutoFlush(true);
- assertTrue(hTableInterface.isAutoFlush());
- hTableInterface.setAutoFlush(initialAutoFlush);
- }
-
private void checkBufferSize() throws IOException {
long initialWriteBufferSize = hTableInterface.getWriteBufferSize();
hTableInterface.setWriteBufferSize(12345L);
@@ -194,19 +182,12 @@ public class TestHTableWrapper {
boolean ex = hTableInterface.exists(new Get(ROW_A).addColumn(TEST_FAMILY, qualifierCol1));
assertTrue(ex);
- Boolean[] exArray = hTableInterface.exists(Arrays.asList(new Get[] {
- new Get(ROW_A).addColumn(TEST_FAMILY, qualifierCol1),
- new Get(ROW_B).addColumn(TEST_FAMILY, qualifierCol1),
- new Get(ROW_C).addColumn(TEST_FAMILY, qualifierCol1),
- new Get(Bytes.toBytes("does not exist")).addColumn(TEST_FAMILY, qualifierCol1), }));
- assertArrayEquals(new Boolean[] { Boolean.TRUE, Boolean.TRUE, Boolean.TRUE, Boolean.FALSE },
- exArray);
- }
-
- @SuppressWarnings("deprecation")
- private void checkGetRowOrBefore() throws IOException {
- Result rowOrBeforeResult = hTableInterface.getRowOrBefore(ROW_A, TEST_FAMILY);
- assertArrayEquals(ROW_A, rowOrBeforeResult.getRow());
+ boolean[] exArray = hTableInterface.existsAll(Arrays.asList(new Get[]{
+ new Get(ROW_A).addColumn(TEST_FAMILY, qualifierCol1),
+ new Get(ROW_B).addColumn(TEST_FAMILY, qualifierCol1),
+ new Get(ROW_C).addColumn(TEST_FAMILY, qualifierCol1),
+ new Get(Bytes.toBytes("does not exist")).addColumn(TEST_FAMILY, qualifierCol1),}));
+ assertTrue(new boolean[]{Boolean.TRUE, Boolean.TRUE, Boolean.TRUE, Boolean.FALSE}.equals(exArray));
}
private void checkAppend() throws IOException {
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
index d7dd8ec..3497cdf 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
@@ -403,10 +403,10 @@ public class TestTableInputFormat {
@Override
public void configure(JobConf job) {
try {
- HTable exampleTable = new HTable(HBaseConfiguration.create(job),
- Bytes.toBytes("exampleDeprecatedTable"));
+ Connection connection = ConnectionFactory.createConnection(job);
+ Table exampleTable = connection.getTable(TableName.valueOf("exampleDeprecatedTable"));
// mandatory
- setHTable(exampleTable);
+ initializeTable(connection, exampleTable.getName());
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
// mandatory
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
index 566a642..bc2d08f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
@@ -412,10 +412,10 @@ public class TestTableInputFormat {
@Override
public void configure(JobConf job) {
try {
- HTable exampleTable = new HTable(HBaseConfiguration.create(job),
- Bytes.toBytes("exampleDeprecatedTable"));
+ Connection connection = ConnectionFactory.createConnection(job);
+ Table exampleTable = connection.getTable(TableName.valueOf(("exampleDeprecatedTable")));
// mandatory
- setHTable(exampleTable);
+ initializeTable(connection, exampleTable.getName());
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
// optional
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 06f9eb8..9d14be6 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -57,6 +57,8 @@ import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
@@ -958,7 +960,8 @@ public class TestSplitTransactionOnCluster {
HTableDescriptor desc = new HTableDescriptor(table);
desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
admin.createTable(desc);
- HTable hTable = new HTable(cluster.getConfiguration(), desc.getTableName());
+ Connection connection = ConnectionFactory.createConnection(cluster.getConfiguration());
+ HTable hTable = (HTable) connection.getTable(desc.getTableName());
for(int i = 1; i < 5; i++) {
Put p1 = new Put(("r"+i).getBytes());
p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
index a501af9..0ec410e 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
@@ -37,11 +37,11 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
@@ -350,7 +350,7 @@ public class TestMasterReplication {
int numClusters = utilities.length;
Table[] htables = new Table[numClusters];
for (int i = 0; i < numClusters; i++) {
- Table htable = new HTable(configurations[i], tableName);
+ Table htable = ConnectionFactory.createConnection(configurations[i]).getTable(tableName);
htable.setWriteBufferSize(1024);
htables[i] = htable;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
index 6f46fd3..b657df3 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LA
import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME;
import static org.apache.hadoop.hbase.security.visibility.VisibilityUtils.SYSTEM_LABEL;
+import java.beans.Visibility;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
@@ -195,13 +196,16 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer
List| cells = null;
if (labelsRegion == null) {
Table table = null;
+ Connection connection = null;
try {
- table = new HTable(conf, VisibilityConstants.LABELS_TABLE_NAME);
+ connection = ConnectionFactory.createConnection(conf);
+ table = connection.getTable(VisibilityConstants.LABELS_TABLE_NAME);
Result result = table.get(get);
cells = result.listCells();
} finally {
if (table != null) {
table.close();
+ connection.close();
}
}
} else {
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index d635ce0..31eddb3 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -1220,7 +1220,7 @@ public class TestHBaseFsck {
HTableDescriptor desc = new HTableDescriptor(table);
desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
admin.createTable(desc);
- tbl = new HTable(cluster.getConfiguration(), desc.getTableName());
+ tbl = (HTable) connection.getTable(desc.getTableName());
for (int i = 0; i < 5; i++) {
Put p1 = new Put(("r" + i).getBytes());
p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/HTablePool.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/HTablePool.java
index e9c9e1f..2d1addb 100644
--- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/HTablePool.java
+++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/HTablePool.java
@@ -30,12 +30,10 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableFactory;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.HTableInterfaceFactory;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@@ -43,6 +41,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -66,10 +65,10 @@ import com.google.protobuf.ServiceException;
* HTableInterface.close() rather than returning the tables to the pool
*
* Once you are done with it, close your instance of
- * {@link org.apache.hadoop.hbase.client.HTableInterface}
- * by calling {@link org.apache.hadoop.hbase.client.HTableInterface#close()} rather than returning
+ * {@link org.apache.hadoop.hbase.client.Table}
+ * by calling {@link org.apache.hadoop.hbase.client.Table#close()} rather than returning
* the tablesto the pool with (deprecated)
- * {@link #putTable(org.apache.hadoop.hbase.client.HTableInterface)}.
+ * {@link #putTable(org.apache.hadoop.hbase.client.Table)}.
*
*
* A pool can be created with a maxSize which defines the most HTable
@@ -85,11 +84,10 @@ import com.google.protobuf.ServiceException;
*/
@InterfaceAudience.Private
public class HTablePool implements Closeable {
- private final PoolMap tables;
+ private final PoolMap tables;
private final int maxSize;
private final PoolType poolType;
private final Configuration config;
- private final HTableInterfaceFactory tableFactory;
/**
* Default Constructor. Default HBaseConfiguration and no limit on pool size.
@@ -99,18 +97,6 @@ public class HTablePool implements Closeable {
}
/**
- * Constructor to set maximum versions and use the specified configuration.
- *
- * @param config
- * configuration
- * @param maxSize
- * maximum number of references to keep for each table
- */
- public HTablePool(final Configuration config, final int maxSize) {
- this(config, maxSize, null, null);
- }
-
- /**
* Constructor to set maximum versions and use the specified configuration and
* table factory.
*
@@ -118,29 +104,9 @@ public class HTablePool implements Closeable {
* configuration
* @param maxSize
* maximum number of references to keep for each table
- * @param tableFactory
- * table factory
- */
- public HTablePool(final Configuration config, final int maxSize,
- final HTableInterfaceFactory tableFactory) {
- this(config, maxSize, tableFactory, PoolType.Reusable);
- }
-
- /**
- * Constructor to set maximum versions and use the specified configuration and
- * pool type.
- *
- * @param config
- * configuration
- * @param maxSize
- * maximum number of references to keep for each table
- * @param poolType
- * pool type which is one of {@link PoolType#Reusable} or
- * {@link PoolType#ThreadLocal}
*/
- public HTablePool(final Configuration config, final int maxSize,
- final PoolType poolType) {
- this(config, maxSize, null, poolType);
+ public HTablePool(final Configuration config, final int maxSize) {
+ this(config, maxSize, PoolType.Reusable);
}
/**
@@ -154,20 +120,16 @@ public class HTablePool implements Closeable {
* configuration
* @param maxSize
* maximum number of references to keep for each table
- * @param tableFactory
- * table factory
* @param poolType
* pool type which is one of {@link PoolType#Reusable} or
* {@link PoolType#ThreadLocal}
*/
public HTablePool(final Configuration config, final int maxSize,
- final HTableInterfaceFactory tableFactory, PoolType poolType) {
+ PoolType poolType) {
// Make a new configuration instance so I can safely cleanup when
// done with the pool.
this.config = config == null ? HBaseConfiguration.create() : config;
this.maxSize = maxSize;
- this.tableFactory = tableFactory == null ? new HTableFactory()
- : tableFactory;
if (poolType == null) {
this.poolType = PoolType.Reusable;
} else {
@@ -181,7 +143,7 @@ public class HTablePool implements Closeable {
break;
}
}
- this.tables = new PoolMap(this.poolType,
+ this.tables = new PoolMap(this.poolType,
this.maxSize);
}
@@ -196,9 +158,9 @@ public class HTablePool implements Closeable {
* @throws RuntimeException
* if there is a problem instantiating the HTable
*/
- public HTableInterface getTable(String tableName) {
+ public Table getTable(String tableName) {
// call the old getTable implementation renamed to findOrCreateTable
- HTableInterface table = findOrCreateTable(tableName);
+ Table table = findOrCreateTable(tableName);
// return a proxy table so when user closes the proxy, the actual table
// will be returned to the pool
return new PooledHTable(table);
@@ -216,8 +178,8 @@ public class HTablePool implements Closeable {
* @throws RuntimeException
* if there is a problem instantiating the HTable
*/
- private HTableInterface findOrCreateTable(String tableName) {
- HTableInterface table = tables.get(tableName);
+ private Table findOrCreateTable(String tableName) {
+ Table table = tables.get(tableName);
if (table == null) {
table = createHTable(tableName);
}
@@ -235,7 +197,7 @@ public class HTablePool implements Closeable {
* @return a reference to the specified table
* @throws RuntimeException if there is a problem instantiating the HTable
*/
- public HTableInterface getTable(byte[] tableName) {
+ public Table getTable(byte[] tableName) {
return getTable(Bytes.toString(tableName));
}
@@ -248,7 +210,7 @@ public class HTablePool implements Closeable {
* @deprecated
*/
@Deprecated
- public void putTable(HTableInterface table) throws IOException {
+ public void putTable(Table table) throws IOException {
// we need to be sure nobody puts a proxy implementation in the pool
// but if the client code is not updated
// and it will continue to call putTable() instead of calling close()
@@ -276,21 +238,25 @@ public class HTablePool implements Closeable {
* @param table
* table
*/
- private void returnTable(HTableInterface table) throws IOException {
+ private void returnTable(Table table) throws IOException {
// this is the old putTable method renamed and made private
- String tableName = Bytes.toString(table.getTableName());
+ String tableName = Bytes.toString(table.getName().toBytes());
if (tables.size(tableName) >= maxSize) {
// release table instance since we're not reusing it
this.tables.removeValue(tableName, table);
- this.tableFactory.releaseHTableInterface(table);
+ table.close();
return;
}
tables.put(tableName, table);
}
- protected HTableInterface createHTable(String tableName) {
- return this.tableFactory.createHTableInterface(config,
- Bytes.toBytes(tableName));
+ protected Table createHTable(String tableName) {
+ try {
+ return ConnectionFactory.createConnection(config).getTable(TableName.valueOf(tableName));
+ } catch (IOException e) {
+ //TODO: better way to handle that?
+ return null;
+ }
}
/**
@@ -304,10 +270,10 @@ public class HTablePool implements Closeable {
* @param tableName
*/
public void closeTablePool(final String tableName) throws IOException {
- Collection tables = this.tables.values(tableName);
+ Collection tables = this.tables.values(tableName);
if (tables != null) {
- for (HTableInterface table : tables) {
- this.tableFactory.releaseHTableInterface(table);
+ for (Table table : tables) {
+ table.close();
}
}
this.tables.remove(tableName);
@@ -340,28 +306,22 @@ public class HTablePool implements Closeable {
}
/**
- * A proxy class that implements HTableInterface.close method to return the
+ * A proxy class that implements Table close method to return the
* wrapped table back to the table pool
*
*/
- class PooledHTable implements HTableInterface {
+ class PooledHTable implements Table {
private boolean open = false;
- private HTableInterface table; // actual table implementation
+ private Table table; // actual table implementation
- public PooledHTable(HTableInterface table) {
+ public PooledHTable(Table table) {
this.table = table;
this.open = true;
}
@Override
- public byte[] getTableName() {
- checkState();
- return table.getTableName();
- }
-
- @Override
public TableName getName() {
return table.getName();
}
@@ -391,12 +351,6 @@ public class HTablePool implements Closeable {
}
@Override
- public Boolean[] exists(List gets) throws IOException {
- checkState();
- return table.exists(gets);
- }
-
- @Override
public void batch(List extends Row> actions, Object[] results) throws IOException,
InterruptedException {
checkState();
@@ -429,14 +383,6 @@ public class HTablePool implements Closeable {
}
@Override
- @SuppressWarnings("deprecation")
- @Deprecated
- public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
- checkState();
- return table.getRowOrBefore(row, family);
- }
-
- @Override
public ResultScanner getScanner(Scan scan) throws IOException {
checkState();
return table.getScanner(scan);
@@ -528,18 +474,6 @@ public class HTablePool implements Closeable {
durability);
}
- @Override
- public boolean isAutoFlush() {
- checkState();
- return table.isAutoFlush();
- }
-
- @Override
- public void flushCommits() throws IOException {
- checkState();
- table.flushCommits();
- }
-
/**
* Returns the actual table back to the pool
*
@@ -583,7 +517,7 @@ public class HTablePool implements Closeable {
*
* @return wrapped htable
*/
- HTableInterface getWrappedTable() {
+ Table getWrappedTable() {
return table;
}
@@ -623,23 +557,6 @@ public class HTablePool implements Closeable {
}
@Override
- public void setAutoFlush(boolean autoFlush) {
- checkState();
- table.setAutoFlush(autoFlush, autoFlush);
- }
-
- @Override
- public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
- checkState();
- table.setAutoFlush(autoFlush, clearBufferOnFail);
- }
-
- @Override
- public void setAutoFlushTo(boolean autoFlush) {
- table.setAutoFlushTo(autoFlush);
- }
-
- @Override
public long getWriteBufferSize() {
checkState();
return table.getWriteBufferSize();
@@ -663,12 +580,6 @@ public class HTablePool implements Closeable {
}
@Override
- public long incrementColumnValue(byte[] row, byte[] family,
- byte[] qualifier, long amount, boolean writeToWAL) throws IOException {
- return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL);
- }
-
- @Override
public Map batchCoprocessorService(
Descriptors.MethodDescriptor method, Message request,
byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
index 41305a6..720a308 100644
--- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
+++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
@@ -40,8 +40,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.HTableFactory;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.security.UserProvider;
@@ -74,7 +72,6 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface {
new ConcurrentHashMap();
private final ConnectionCache connectionCache;
- private final HTableFactory tableFactory;
private final int maxPoolSize;
static final String CLEANUP_INTERVAL = "hbase.thrift.connection.cleanup-interval";
@@ -122,23 +119,12 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface {
int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000);
connectionCache = new ConnectionCache(
conf, userProvider, cleanInterval, maxIdleTime);
- tableFactory = new HTableFactory() {
- @Override
- public HTableInterface createHTableInterface(Configuration config,
- byte[] tableName) {
- try {
- return connectionCache.getTable(Bytes.toString(tableName));
- } catch (IOException ioe) {
- throw new RuntimeException(ioe);
- }
- }
- };
htablePools = CacheBuilder.newBuilder().expireAfterAccess(
maxIdleTime, TimeUnit.MILLISECONDS).softValues().concurrencyLevel(4).build();
maxPoolSize = conf.getInt("hbase.thrift.htablepool.size.max", 1000);
htablePoolCreater = new Callable() {
public HTablePool call() {
- return new HTablePool(conf, maxPoolSize, tableFactory);
+ return new HTablePool(conf, maxPoolSize);
}
};
}
diff --git hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestHTablePool.java hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestHTablePool.java
index 101a7cf..51568f6 100644
--- hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestHTablePool.java
+++ hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestHTablePool.java
@@ -23,7 +23,6 @@ import java.io.IOException;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -150,7 +149,7 @@ public class TestHTablePool {
// empty pool
// get table will return proxy implementation
- HTableInterface table = pool.getTable(tableName);
+ Table table = pool.getTable(tableName);
// put back the proxy implementation instead of closing it
pool.putTable(table);
@@ -173,11 +172,7 @@ public class TestHTablePool {
// get table will return proxy implementation
final Table table = pool.getTable(tableName);
- HTableInterface alienTable = new HTable(TEST_UTIL.getConfiguration(),
- TableName.valueOf(TABLENAME)) {
- // implementation doesn't matter as long the table is not from
- // pool
- };
+ Table alienTable = TEST_UTIL.getConnection().getTable(TableName.valueOf(TABLENAME));
try {
// put the wrong table in pool
pool.putTable(alienTable);
|