From dbfd15c41db46f91713b3f26de94aed60dc7115e Mon Sep 17 00:00:00 2001
From: Ashish Singhi
Date: Fri, 26 Jun 2015 23:48:19 +0530
Subject: [PATCH] HBASE-13214 Remove deprecated and unused methods from HTable
class
---
.../org/apache/hadoop/hbase/MetaTableAccessor.java | 2 +-
.../hadoop/hbase/client/BufferedMutator.java | 2 +-
.../hadoop/hbase/client/BufferedMutatorImpl.java | 2 +-
.../org/apache/hadoop/hbase/client/HTable.java | 390 +--------------------
.../hadoop/hbase/client/HTableInterface.java | 47 +--
.../java/org/apache/hadoop/hbase/client/Table.java | 31 +-
.../hadoop/hbase/client/TestAsyncProcess.java | 2 +-
.../hadoop/hbase/rest/client/RemoteHTable.java | 11 -
.../apache/hadoop/hbase/client/HTableWrapper.java | 25 --
.../replication/regionserver/ReplicationSink.java | 2 +-
.../main/resources/hbase-webapps/master/table.jsp | 32 +-
.../hadoop/hbase/client/TestFromClientSide.java | 93 +++--
.../hbase/client/TestFromClientSideNoCodec.java | 5 +-
.../org/apache/hadoop/hbase/client/TestHCM.java | 6 +-
.../hadoop/hbase/client/TestMultiParallel.java | 35 +-
.../hbase/client/TestRpcControllerFactory.java | 2 +-
.../hadoop/hbase/client/TestSizeFailures.java | 4 +-
.../hbase/coprocessor/TestHTableWrapper.java | 39 ++-
.../coprocessor/TestOpenTableInCoprocessor.java | 2 +-
19 files changed, 154 insertions(+), 578 deletions(-)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index ee42b93..2fbfd9f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -1457,7 +1457,7 @@ public class MetaTableAccessor {
if (METALOG.isDebugEnabled()) {
METALOG.debug(mutationsToString(mutations));
}
- t.batch(mutations, new Object[mutations.size()]);
+ t.batch(mutations, null);
} catch (InterruptedException e) {
InterruptedIOException ie = new InterruptedIOException(e.getMessage());
ie.initCause(e);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java
index 4424cec..3287335 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java
@@ -52,7 +52,7 @@ import java.util.List;
* extreme circumstances, such as JVM or machine failure, may cause some data loss.
*
* NOTE: This class replaces the functionality that used to be available via
- * {@link HTableInterface#setAutoFlush(boolean)} set to {@code false}.
+ *HTableInterface#setAutoFlush(boolean) set to {@code false}.
*
*
* See also the {@code BufferedMutatorExample} in the hbase-examples module.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index ba86986..2ee00d9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -247,7 +247,7 @@ public class BufferedMutatorImpl implements BufferedMutator {
}
/**
- * This is used for legacy purposes in {@link HTable#getWriteBuffer()} only. This should not beÓ
+ * This is used for legacy purposes only. This should not beÓ
* called from production uses.
* @deprecated Going away when we drop public support for {@link HTableInterface}.
Ó */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 686aaa8..ebeaabd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -24,7 +24,6 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
-import java.util.NavigableMap;
import java.util.TreeMap;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
@@ -38,14 +37,10 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -241,123 +236,6 @@ public class HTable implements HTableInterface {
}
/**
- * Tells whether or not a table is enabled or not. This method creates a
- * new HBase configuration, so it might make your unit tests fail due to
- * incorrect ZK client port.
- * @param tableName Name of table to check.
- * @return {@code true} if table is online.
- * @throws IOException if a remote or network exception occurs
- * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
- */
- @Deprecated
- public static boolean isTableEnabled(String tableName) throws IOException {
- return isTableEnabled(TableName.valueOf(tableName));
- }
-
- /**
- * Tells whether or not a table is enabled or not. This method creates a
- * new HBase configuration, so it might make your unit tests fail due to
- * incorrect ZK client port.
- * @param tableName Name of table to check.
- * @return {@code true} if table is online.
- * @throws IOException if a remote or network exception occurs
- * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
- */
- @Deprecated
- public static boolean isTableEnabled(byte[] tableName) throws IOException {
- return isTableEnabled(TableName.valueOf(tableName));
- }
-
- /**
- * Tells whether or not a table is enabled or not. This method creates a
- * new HBase configuration, so it might make your unit tests fail due to
- * incorrect ZK client port.
- * @param tableName Name of table to check.
- * @return {@code true} if table is online.
- * @throws IOException if a remote or network exception occurs
- * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
- */
- @Deprecated
- public static boolean isTableEnabled(TableName tableName) throws IOException {
- return isTableEnabled(HBaseConfiguration.create(), tableName);
- }
-
- /**
- * Tells whether or not a table is enabled or not.
- * @param conf The Configuration object to use.
- * @param tableName Name of table to check.
- * @return {@code true} if table is online.
- * @throws IOException if a remote or network exception occurs
- * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
- */
- @Deprecated
- public static boolean isTableEnabled(Configuration conf, String tableName)
- throws IOException {
- return isTableEnabled(conf, TableName.valueOf(tableName));
- }
-
- /**
- * Tells whether or not a table is enabled or not.
- * @param conf The Configuration object to use.
- * @param tableName Name of table to check.
- * @return {@code true} if table is online.
- * @throws IOException if a remote or network exception occurs
- * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
- */
- @Deprecated
- public static boolean isTableEnabled(Configuration conf, byte[] tableName)
- throws IOException {
- return isTableEnabled(conf, TableName.valueOf(tableName));
- }
-
- /**
- * Tells whether or not a table is enabled or not.
- * @param conf The Configuration object to use.
- * @param tableName Name of table to check.
- * @return {@code true} if table is online.
- * @throws IOException if a remote or network exception occurs
- * @deprecated use {@link HBaseAdmin#isTableEnabled(org.apache.hadoop.hbase.TableName tableName)}
- */
- @Deprecated
- public static boolean isTableEnabled(Configuration conf,
- final TableName tableName) throws IOException {
- try(Connection conn = ConnectionFactory.createConnection(conf)) {
- return conn.getAdmin().isTableEnabled(tableName);
- }
- }
-
- /**
- * Find region location hosting passed row using cached info
- * @param row Row to find.
- * @return The location of the given row.
- * @throws IOException if a remote or network exception occurs
- * @deprecated Use {@link RegionLocator#getRegionLocation(byte[])}
- */
- @Deprecated
- public HRegionLocation getRegionLocation(final String row)
- throws IOException {
- return getRegionLocation(Bytes.toBytes(row), false);
- }
-
- /**
- * @deprecated Use {@link RegionLocator#getRegionLocation(byte[])} instead.
- */
- @Deprecated
- public HRegionLocation getRegionLocation(final byte [] row)
- throws IOException {
- return locator.getRegionLocation(row);
- }
-
- /**
- * @deprecated Use {@link RegionLocator#getRegionLocation(byte[], boolean)} instead.
- */
- @Deprecated
- public HRegionLocation getRegionLocation(final byte [] row, boolean reload)
- throws IOException {
- return locator.getRegionLocation(row, reload);
- }
-
- /**
* {@inheritDoc}
*/
@Override
@@ -384,15 +262,6 @@ public class HTable implements HTableInterface {
}
/**
- * Kept in 0.96 for backward compatibility
- * @deprecated since 0.96. This is an internal buffer that should not be read nor write.
- */
- @Deprecated
- public List getWriteBuffer() {
- return mutator == null ? null : mutator.getWriteBuffer();
- }
-
- /**
* {@inheritDoc}
*/
@Override
@@ -457,69 +326,6 @@ public class HTable implements HTableInterface {
}
/**
- * Gets all the regions and their address for this table.
- *
- * This is mainly useful for the MapReduce integration.
- * @return A map of HRegionInfo with it's server address
- * @throws IOException if a remote or network exception occurs
- * @deprecated This is no longer a public API. Use {@link #getAllRegionLocations()} instead.
- */
- @SuppressWarnings("deprecation")
- @Deprecated
- public NavigableMap getRegionLocations() throws IOException {
- // TODO: Odd that this returns a Map of HRI to SN whereas getRegionLocator, singular,
- // returns an HRegionLocation.
- return MetaTableAccessor.allTableRegions(this.connection, getName());
- }
-
- /**
- * Gets all the regions and their address for this table.
- *
- * This is mainly useful for the MapReduce integration.
- * @return A map of HRegionInfo with it's server address
- * @throws IOException if a remote or network exception occurs
- *
- * @deprecated Use {@link RegionLocator#getAllRegionLocations()} instead;
- */
- @Deprecated
- public List getAllRegionLocations() throws IOException {
- return locator.getAllRegionLocations();
- }
-
- /**
- * Get the corresponding regions for an arbitrary range of keys.
- *
- * @param startKey Starting row in range, inclusive
- * @param endKey Ending row in range, exclusive
- * @return A list of HRegionLocations corresponding to the regions that
- * contain the specified range
- * @throws IOException if a remote or network exception occurs
- * @deprecated This is no longer a public API
- */
- @Deprecated
- public List getRegionsInRange(final byte [] startKey,
- final byte [] endKey) throws IOException {
- return getRegionsInRange(startKey, endKey, false);
- }
-
- /**
- * Get the corresponding regions for an arbitrary range of keys.
- *
- * @param startKey Starting row in range, inclusive
- * @param endKey Ending row in range, exclusive
- * @param reload true to reload information or false to use cached information
- * @return A list of HRegionLocations corresponding to the regions that
- * contain the specified range
- * @throws IOException if a remote or network exception occurs
- * @deprecated This is no longer a public API
- */
- @Deprecated
- public List getRegionsInRange(final byte [] startKey,
- final byte [] endKey, final boolean reload) throws IOException {
- return getKeysAndRegionsInRange(startKey, endKey, false, reload).getSecond();
- }
-
- /**
* Get the corresponding start keys and regions for an arbitrary range of
* keys.
*
@@ -529,9 +335,7 @@ public class HTable implements HTableInterface {
* @return A pair of list of start keys and list of HRegionLocations that
* contain the specified range
* @throws IOException if a remote or network exception occurs
- * @deprecated This is no longer a public API
*/
- @Deprecated
private Pair, List> getKeysAndRegionsInRange(
final byte[] startKey, final byte[] endKey, final boolean includeEndKey)
throws IOException {
@@ -549,9 +353,7 @@ public class HTable implements HTableInterface {
* @return A pair of list of start keys and list of HRegionLocations that
* contain the specified range
* @throws IOException if a remote or network exception occurs
- * @deprecated This is no longer a public API
*/
- @Deprecated
private Pair, List> getKeysAndRegionsInRange(
final byte[] startKey, final byte[] endKey, final boolean includeEndKey,
final boolean reload) throws IOException {
@@ -565,7 +367,7 @@ public class HTable implements HTableInterface {
List regionsInRange = new ArrayList();
byte[] currentKey = startKey;
do {
- HRegionLocation regionLocation = getRegionLocation(currentKey, reload);
+ HRegionLocation regionLocation = getRegionLocator().getRegionLocation(currentKey, reload);
keysInRange.add(currentKey);
regionsInRange.add(regionLocation);
currentKey = regionLocation.getRegionInfo().getEndKey();
@@ -577,35 +379,6 @@ public class HTable implements HTableInterface {
}
/**
- * {@inheritDoc}
- * @deprecated Use reversed scan instead.
- */
- @Override
- @Deprecated
- public Result getRowOrBefore(final byte[] row, final byte[] family)
- throws IOException {
- RegionServerCallable callable = new RegionServerCallable(this.connection,
- tableName, row) {
- @Override
- public Result call(int callTimeout) throws IOException {
- PayloadCarryingRpcController controller = rpcControllerFactory.newController();
- controller.setPriority(tableName);
- controller.setCallTimeout(callTimeout);
- ClientProtos.GetRequest request = RequestConverter.buildGetRowOrBeforeRequest(
- getLocation().getRegionInfo().getRegionName(), row, family);
- try {
- ClientProtos.GetResponse response = getStub().get(controller, request);
- if (!response.hasResult()) return null;
- return ProtobufUtil.toResult(response.getResult());
- } catch (ServiceException se) {
- throw ProtobufUtil.getRemoteException(se);
- }
- }
- };
- return rpcCallerFactory.newCaller().callWithRetries(callable, this.operationTimeout);
- }
-
- /**
* The underlying {@link HTable} must not be closed.
* {@link HTableInterface#getScanner(Scan)} has other usage details.
*/
@@ -740,7 +513,8 @@ public class HTable implements HTableInterface {
return new Result[]{get(gets.get(0))};
}
try {
- Object [] r1 = batch((List)gets);
+ Object[] r1 = new Object[gets.size()];
+ batch((List) gets, r1);
// translate.
Result [] results = new Result[r1.length];
@@ -771,20 +545,6 @@ public class HTable implements HTableInterface {
/**
* {@inheritDoc}
- * @deprecated If any exception is thrown by one of the actions, there is no way to
- * retrieve the partially executed results. Use {@link #batch(List, Object[])} instead.
- */
- @Deprecated
- @Override
- public Object[] batch(final List extends Row> actions)
- throws InterruptedException, IOException {
- Object[] results = new Object[actions.size()];
- batch(actions, results);
- return results;
- }
-
- /**
- * {@inheritDoc}
*/
@Override
public void batchCallback(
@@ -795,23 +555,6 @@ public class HTable implements HTableInterface {
/**
* {@inheritDoc}
- * @deprecated If any exception is thrown by one of the actions, there is no way to
- * retrieve the partially executed results. Use
- * {@link #batchCallback(List, Object[], Batch.Callback)}
- * instead.
- */
- @Deprecated
- @Override
- public Object[] batchCallback(
- final List extends Row> actions, final Batch.Callback callback) throws IOException,
- InterruptedException {
- Object[] results = new Object[actions.size()];
- batchCallback(actions, results, callback);
- return results;
- }
-
- /**
- * {@inheritDoc}
*/
@Override
public void delete(final Delete delete)
@@ -1221,9 +964,9 @@ public class HTable implements HTableInterface {
exists.add(ge);
}
- Object[] r1;
+ Object[] r1= new Object[exists.size()];
try {
- r1 = batch(exists);
+ batch(exists, r1);
} catch (InterruptedException e) {
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
}
@@ -1241,21 +984,6 @@ public class HTable implements HTableInterface {
/**
* {@inheritDoc}
- * @deprecated Use {@link #existsAll(java.util.List)} instead.
- */
- @Override
- @Deprecated
- public Boolean[] exists(final List gets) throws IOException {
- boolean[] results = existsAll(gets);
- Boolean[] objectResults = new Boolean[results.length];
- for (int i = 0; i < results.length; ++i) {
- objectResults[i] = results[i];
- }
- return objectResults;
- }
-
- /**
- * {@inheritDoc}
* @throws IOException
*/
@Override
@@ -1353,19 +1081,6 @@ public class HTable implements HTableInterface {
/**
* {@inheritDoc}
- * @deprecated in 0.96. When called with setAutoFlush(false), this function also
- * set clearBufferOnFail to true, which is unexpected but kept for historical reasons.
- * Replace it with setAutoFlush(false, false) if this is exactly what you want, or by
- * {@link #setAutoFlushTo(boolean)} for all other cases.
- */
- @Deprecated
- @Override
- public void setAutoFlush(boolean autoFlush) {
- this.autoFlush = autoFlush;
- }
-
- /**
- * {@inheritDoc}
*/
@Override
public void setAutoFlushTo(boolean autoFlush) {
@@ -1419,101 +1134,6 @@ public class HTable implements HTableInterface {
}
/**
- * Enable or disable region cache prefetch for the table. It will be
- * applied for the given table's all HTable instances who share the same
- * connection. By default, the cache prefetch is enabled.
- * @param tableName name of table to configure.
- * @param enable Set to true to enable region cache prefetch. Or set to
- * false to disable it.
- * @throws IOException
- * @deprecated does nothing since 0.99
- */
- @Deprecated
- public static void setRegionCachePrefetch(final byte[] tableName,
- final boolean enable) throws IOException {
- }
-
- /**
- * @deprecated does nothing since 0.99
- */
- @Deprecated
- public static void setRegionCachePrefetch(
- final TableName tableName,
- final boolean enable) throws IOException {
- }
-
- /**
- * Enable or disable region cache prefetch for the table. It will be
- * applied for the given table's all HTable instances who share the same
- * connection. By default, the cache prefetch is enabled.
- * @param conf The Configuration object to use.
- * @param tableName name of table to configure.
- * @param enable Set to true to enable region cache prefetch. Or set to
- * false to disable it.
- * @throws IOException
- * @deprecated does nothing since 0.99
- */
- @Deprecated
- public static void setRegionCachePrefetch(final Configuration conf,
- final byte[] tableName, final boolean enable) throws IOException {
- }
-
- /**
- * @deprecated does nothing since 0.99
- */
- @Deprecated
- public static void setRegionCachePrefetch(final Configuration conf,
- final TableName tableName,
- final boolean enable) throws IOException {
- }
-
- /**
- * Check whether region cache prefetch is enabled or not for the table.
- * @param conf The Configuration object to use.
- * @param tableName name of table to check
- * @return true if table's region cache prefecth is enabled. Otherwise
- * it is disabled.
- * @throws IOException
- * @deprecated always return false since 0.99
- */
- @Deprecated
- public static boolean getRegionCachePrefetch(final Configuration conf,
- final byte[] tableName) throws IOException {
- return false;
- }
-
- /**
- * @deprecated always return false since 0.99
- */
- @Deprecated
- public static boolean getRegionCachePrefetch(final Configuration conf,
- final TableName tableName) throws IOException {
- return false;
- }
-
- /**
- * Check whether region cache prefetch is enabled or not for the table.
- * @param tableName name of table to check
- * @return true if table's region cache prefecth is enabled. Otherwise
- * it is disabled.
- * @throws IOException
- * @deprecated always return false since 0.99
- */
- @Deprecated
- public static boolean getRegionCachePrefetch(final byte[] tableName) throws IOException {
- return false;
- }
-
- /**
- * @deprecated always return false since 0.99
- */
- @Deprecated
- public static boolean getRegionCachePrefetch(
- final TableName tableName) throws IOException {
- return false;
- }
-
- /**
* Explicitly clears the region cache to fetch the latest value from META.
* This is a power user function: avoid unless you know the ramifications.
*/
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
index 745c770..4cd81e7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.client;
import java.io.IOException;
-import java.util.List;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
@@ -46,26 +45,6 @@ public interface HTableInterface extends Table {
byte[] getTableName();
/**
- * @deprecated Use {@link #existsAll(java.util.List)} instead.
- */
- @Deprecated
- Boolean[] exists(List gets) throws IOException;
-
-
- /**
- * See {@link #setAutoFlush(boolean, boolean)}
- *
- * @param autoFlush
- * Whether or not to enable 'auto-flush'.
- * @deprecated in 0.96. When called with setAutoFlush(false), this function also
- * set clearBufferOnFail to true, which is unexpected but kept for historical reasons.
- * Replace it with setAutoFlush(false, false) if this is exactly what you want, though
- * this is the method you want for most cases.
- */
- @Deprecated
- void setAutoFlush(boolean autoFlush);
-
- /**
* Turns 'auto-flush' on or off.
*
* When enabled (default), {@link Put} operations don't get buffered/delayed
@@ -96,8 +75,7 @@ public interface HTableInterface extends Table {
* Whether to keep Put failures in the writeBuffer. If autoFlush is true, then
* the value of this parameter is ignored and clearBufferOnFail is set to true.
* Setting clearBufferOnFail to false is deprecated since 0.96.
- * @deprecated in 0.99 since setting clearBufferOnFail is deprecated. Use
- * {@link #setAutoFlush(boolean)}} instead.
+ * @deprecated in 0.99 since setting clearBufferOnFail is deprecated.
* @see BufferedMutator#flush()
*/
@Deprecated
@@ -105,8 +83,8 @@ public interface HTableInterface extends Table {
/**
* Set the autoFlush behavior, without changing the value of {@code clearBufferOnFail}.
- * @deprecated in 0.99 since setting clearBufferOnFail is deprecated. Use
- * {@link #setAutoFlush(boolean)} instead, or better still, move on to {@link BufferedMutator}
+ * @deprecated in 0.99 since setting clearBufferOnFail is deprecated. Move on to
+ * {@link BufferedMutator}
*/
@Deprecated
void setAutoFlushTo(boolean autoFlush);
@@ -157,23 +135,4 @@ public interface HTableInterface extends Table {
*/
@Deprecated
void setWriteBufferSize(long writeBufferSize) throws IOException;
-
-
- /**
- * Return the row that matches row exactly,
- * or the one that immediately precedes it.
- *
- * @param row A row key.
- * @param family Column family to include in the {@link Result}.
- * @throws IOException if a remote or network exception occurs.
- * @since 0.20.0
- *
- * @deprecated As of version 0.92 this method is deprecated without
- * replacement. Since version 0.96+, you can use reversed scan.
- * getRowOrBefore is used internally to find entries in hbase:meta and makes
- * various assumptions about the table (which are true for hbase:meta but not
- * in general) to be efficient.
- */
- @Deprecated
- Result getRowOrBefore(byte[] row, byte[] family) throws IOException;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 498c587..3e9db00 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -120,21 +120,6 @@ public interface Table extends Closeable {
InterruptedException;
/**
- * Same as {@link #batch(List, Object[])}, but returns an array of
- * results instead of using a results parameter reference.
- *
- * @param actions list of Get, Put, Delete, Increment, Append objects
- * @return the results from the actions. A null in the return array means that
- * the call for that action failed, even after retries
- * @throws IOException
- * @since 0.90.0
- * @deprecated If any exception is thrown by one of the actions, there is no way to
- * retrieve the partially executed results. Use {@link #batch(List, Object[])} instead.
- */
- @Deprecated
- Object[] batch(final List extends Row> actions) throws IOException, InterruptedException;
-
- /**
* Same as {@link #batch(List, Object[])}, but with a callback.
* @since 0.96.0
*/
@@ -144,20 +129,6 @@ public interface Table extends Closeable {
throws IOException, InterruptedException;
/**
- * Same as {@link #batch(List)}, but with a callback.
- *
- * @since 0.96.0
- * @deprecated If any exception is thrown by one of the actions, there is no way to retrieve the
- * partially executed results. Use {@link #batchCallback(List, Object[],
- * org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} instead.
- */
- @Deprecated
- Object[] batchCallback(
- List extends Row> actions, Batch.Callback callback
- ) throws IOException,
- InterruptedException;
-
- /**
* Extracts certain cells from a given row.
* @param get The object that specifies what data to fetch and from which row.
* @return The data coming from the specified row, if it exists. If the row
@@ -219,7 +190,7 @@ public interface Table extends Closeable {
/**
* Puts some data in the table.
- *
+ *
* @param put The data to put.
* @throws IOException if a remote or network exception occurs.
* @since 0.20.0
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index 1460de4..8340f97 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -867,7 +867,7 @@ public class TestAsyncProcess {
MyAsyncProcess ap = new MyAsyncProcess(con, conf, con.nbThreads);
ht.multiAp = ap;
- ht.batch(gets, new Object[gets.size()]);
+ ht.batch(gets, null);
Assert.assertEquals(ap.nbActions.get(), NB_REGS);
Assert.assertEquals("1 multi response per server", 2, ap.nbMultiResponse.get());
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index 2d122df..a12c747 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -792,23 +792,12 @@ public class RemoteHTable implements Table {
}
@Override
- public Object[] batch(List extends Row> actions) throws IOException {
- throw new IOException("batch not supported");
- }
-
- @Override
public void batchCallback(List extends Row> actions, Object[] results,
Batch.Callback callback) throws IOException, InterruptedException {
throw new IOException("batchCallback not supported");
}
@Override
- public Object[] batchCallback(List extends Row> actions, Batch.Callback callback)
- throws IOException, InterruptedException {
- throw new IOException("batchCallback not supported");
- }
-
- @Override
public CoprocessorRpcChannel coprocessorService(byte[] row) {
throw new UnsupportedOperationException("coprocessorService not implemented");
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
index e3641c7..6713546 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
@@ -242,37 +242,12 @@ public final class HTableWrapper implements Table {
table.batch(actions, results);
}
- /**
- * {@inheritDoc}
- * @deprecated If any exception is thrown by one of the actions, there is no way to
- * retrieve the partially executed results. Use {@link #batch(List, Object[])} instead.
- */
- @Deprecated
- @Override
- public Object[] batch(List extends Row> actions)
- throws IOException, InterruptedException {
- return table.batch(actions);
- }
-
@Override
public void batchCallback(List extends Row> actions, Object[] results,
Batch.Callback callback) throws IOException, InterruptedException {
table.batchCallback(actions, results, callback);
}
- /**
- * {@inheritDoc}
- * @deprecated If any exception is thrown by one of the actions, there is no way to
- * retrieve the partially executed results. Use
- * {@link #batchCallback(List, Object[], Batch.Callback)} instead.
- */
- @Deprecated
- @Override
- public Object[] batchCallback(List extends Row> actions,
- Batch.Callback callback) throws IOException, InterruptedException {
- return table.batchCallback(actions, callback);
- }
-
@Override
public Result[] get(List gets) throws IOException {
return table.get(gets);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
index 7d47677..fc7aa8e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
@@ -234,7 +234,7 @@ public class ReplicationSink {
try {
table = this.sharedHtableCon.getTable(tableName);
for (List rows : allRows) {
- table.batch(rows);
+ table.batch(rows, null);
}
} catch (InterruptedException ix) {
throw (InterruptedIOException)new InterruptedIOException().initCause(ix);
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index f057a44..45be52b 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -20,18 +20,21 @@
<%@ page contentType="text/html;charset=UTF-8"
import="static org.apache.commons.lang.StringEscapeUtils.escapeXml"
import="java.util.TreeMap"
+ import="java.util.List"
import="java.util.Map"
import="java.util.Set"
import="java.util.Collection"
import="org.apache.hadoop.conf.Configuration"
import="org.apache.hadoop.hbase.client.HTable"
import="org.apache.hadoop.hbase.client.Admin"
+ import="org.apache.hadoop.hbase.client.RegionLocator"
import="org.apache.hadoop.hbase.HRegionInfo"
+ import="org.apache.hadoop.hbase.HRegionLocation"
import="org.apache.hadoop.hbase.ServerName"
import="org.apache.hadoop.hbase.ServerLoad"
import="org.apache.hadoop.hbase.RegionLoad"
import="org.apache.hadoop.hbase.HConstants"
- import="org.apache.hadoop.hbase.master.HMaster"
+ import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator"
import="org.apache.hadoop.hbase.util.Bytes"
import="org.apache.hadoop.hbase.util.FSUtils"
@@ -119,8 +122,8 @@
-<%
-if ( fqtn != null ) {
+<%
+if ( fqtn != null ) {
table = (HTable) master.getConnection().getTable(fqtn);
if (table.getTableDescriptor().getRegionReplication() > 1) {
tableHeader = "Table Regions
| Name | Region Server | Start Key | End Key | Locality | Requests | ReplicaID |
";
@@ -128,7 +131,7 @@ if ( fqtn != null ) {
} else {
tableHeader = "Table Regions
| Name | Region Server | Start Key | End Key | Locality | Requests |
";
}
- if ( !readOnly && action != null ) {
+ if ( !readOnly && action != null ) {
%>
@@ -147,7 +150,7 @@ if ( fqtn != null ) {
} else {
admin.split(TableName.valueOf(fqtn));
}
-
+
%> Split request accepted. <%
} else if (action.equals("compact")) {
if (key != null && key.length() > 0) {
@@ -201,6 +204,7 @@ if ( fqtn != null ) {
<%} else {
Admin admin = master.getConnection().getAdmin();
+ RegionLocator r = master.getConnection().getRegionLocator(table.getName());
try { %>
Table Attributes
@@ -256,7 +260,7 @@ if ( fqtn != null ) {
| Property |
- Value |
+ Value |
<%
Map familyValues = family.getValues();
@@ -278,13 +282,13 @@ if ( fqtn != null ) {
<%
Map regDistribution = new TreeMap();
- Map regions = table.getRegionLocations();
+ List regions = r.getAllRegionLocations();
if(regions != null && regions.size() > 0) { %>
<%= tableHeader %>
<%
- for (Map.Entry hriEntry : regions.entrySet()) {
- HRegionInfo regionInfo = hriEntry.getKey();
- ServerName addr = hriEntry.getValue();
+ for (HRegionLocation hriEntry : regions) {
+ HRegionInfo regionInfo = hriEntry.getRegionInfo();
+ ServerName addr = hriEntry.getServerName();
long req = 0;
float locality = 0.0f;
String urlRegionServer = null;
@@ -339,8 +343,8 @@ if ( fqtn != null ) {
Regions by Region Server
| Region Server | Region Count |
<%
- for (Map.Entry rdEntry : regDistribution.entrySet()) {
- ServerName addr = rdEntry.getKey();
+ for (Map.Entry rdEntry : regDistribution.entrySet()) {
+ ServerName addr = rdEntry.getKey();
String url = "//" + addr.getHostname() + ":" + master.getRegionServerInfoPort(addr) + "/";
%>
@@ -399,8 +403,8 @@ Actions:
<% } %>
-<% }
-} else { // handle the case for fqtn is null with error message + redirect
+<% }
+} else { // handle the case for fqtn is null with error message + redirect
%>