Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java (revision 1552255)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java (working copy)
@@ -446,35 +446,19 @@
});
}
- /**
- * Find region location hosting passed row using cached info
- * @param row Row to find.
- * @return The location of the given row.
- * @throws IOException if a remote or network exception occurs
- */
+ @Override
public HRegionLocation getRegionLocation(final String row)
throws IOException {
return connection.getRegionLocation(tableName, Bytes.toBytes(row), false);
}
- /**
- * Finds the region on which the given row is being served. Does not reload the cache.
- * @param row Row to find.
- * @return Location of the row.
- * @throws IOException if a remote or network exception occurs
- */
+ @Override
public HRegionLocation getRegionLocation(final byte [] row)
throws IOException {
return connection.getRegionLocation(tableName, row, false);
}
- /**
- * Finds the region on which the given row is being served.
- * @param row Row to find.
- * @param reload true to reload information or false to use cached information
- * @return Location of the row.
- * @throws IOException if a remote or network exception occurs
- */
+ @Override
public HRegionLocation getRegionLocation(final byte [] row, boolean reload)
throws IOException {
return connection.getRegionLocation(tableName, row, reload);
@@ -550,36 +534,17 @@
this.connection.getHTableDescriptor(this.tableName));
}
- /**
- * Gets the starting row key for every region in the currently open table.
- *
- * This is mainly useful for the MapReduce integration.
- * @return Array of region starting row keys
- * @throws IOException if a remote or network exception occurs
- */
+ @Override
public byte [][] getStartKeys() throws IOException {
return getStartEndKeys().getFirst();
}
- /**
- * Gets the ending row key for every region in the currently open table.
- *
- * This is mainly useful for the MapReduce integration.
- * @return Array of region ending row keys
- * @throws IOException if a remote or network exception occurs
- */
+ @Override
public byte[][] getEndKeys() throws IOException {
return getStartEndKeys().getSecond();
}
- /**
- * Gets the starting and ending row keys for every region in the currently
- * open table.
- *
- * This is mainly useful for the MapReduce integration.
- * @return Pair of arrays of region starting and ending row keys
- * @throws IOException if a remote or network exception occurs
- */
+ @Override
public Pair getStartEndKeys() throws IOException {
NavigableMap regions = getRegionLocations();
final List startKeyList = new ArrayList(regions.size());
@@ -595,6 +560,18 @@
endKeyList.toArray(new byte[endKeyList.size()][]));
}
+ @Override
+ public byte[][] getSplitKeys() throws IOException {
+ List splitKeysList = new ArrayList();
+ for (HRegionInfo regionInfo : getRegionLocations().keySet()) {
+ if (Bytes.equals(regionInfo.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
+ continue;
+ }
+ splitKeysList.add(regionInfo.getStartKey());
+ }
+ return splitKeysList.toArray(new byte[splitKeysList.size()][]);
+ }
+
/**
* Gets all the regions and their address for this table.
*
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java (revision 1552255)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java (working copy)
@@ -24,11 +24,13 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.util.Pair;
import java.io.Closeable;
import java.io.IOException;
@@ -585,4 +587,63 @@
* @throws IOException if a remote or network exception occurs.
*/
void setWriteBufferSize(long writeBufferSize) throws IOException;
+
+ /**
+ * Gets the split keys of the currently open table.
+ * @return Array of table split keys
+ * @throws IOException if a remote or network exception occurs
+ */
+ public byte[][] getSplitKeys() throws IOException;
+
+ /**
+ * Gets the starting row key for every region in the currently open table.
+ *
+ * This is mainly useful for the MapReduce integration.
+ * @return Array of region starting row keys
+ * @throws IOException if a remote or network exception occurs
+ */
+ public byte[][] getStartKeys() throws IOException;
+
+ /**
+ * Gets the ending row key for every region in the currently open table.
+ *
+ * This is mainly useful for the MapReduce integration.
+ * @return Array of region ending row keys
+ * @throws IOException if a remote or network exception occurs
+ */
+ public byte[][] getEndKeys() throws IOException;
+
+ /**
+ * Gets the starting and ending row keys for every region in the currently open table.
+ *
+ * This is mainly useful for the MapReduce integration.
+ * @return Pair of arrays of region starting and ending row keys
+ * @throws IOException if a remote or network exception occurs
+ */
+ public Pair getStartEndKeys() throws IOException;
+
+ /**
+ * Find region location hosting passed row using cached info
+ * @param row Row to find.
+ * @return The location of the given row.
+ * @throws IOException if a remote or network exception occurs
+ */
+ public HRegionLocation getRegionLocation(final String row) throws IOException;
+
+ /**
+ * Finds the region on which the given row is being served. Does not reload the cache.
+ * @param row Row to find.
+ * @return Location of the row.
+ * @throws IOException if a remote or network exception occurs
+ */
+ public HRegionLocation getRegionLocation(final byte[] row) throws IOException;
+
+ /**
+ * Finds the region on which the given row is being served.
+ * @param row Row to find.
+ * @param reload true to reload information or false to use cached information
+ * @return Location of the row.
+ * @throws IOException if a remote or network exception occurs
+ */
+ public HRegionLocation getRegionLocation(final byte[] row, boolean reload) throws IOException;
}
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java (revision 1552255)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java (working copy)
@@ -27,12 +27,14 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PoolMap;
import org.apache.hadoop.hbase.util.PoolMap.PoolType;
@@ -622,5 +624,40 @@
byte[] qualifier, long amount, boolean writeToWAL) throws IOException {
return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL);
}
+
+ @Override
+ public byte[][] getSplitKeys() throws IOException {
+ return table.getSplitKeys();
+ }
+
+ @Override
+ public byte[][] getStartKeys() throws IOException {
+ return table.getStartKeys();
+ }
+
+ @Override
+ public byte[][] getEndKeys() throws IOException {
+ return table.getEndKeys();
+ }
+
+ @Override
+ public Pair getStartEndKeys() throws IOException {
+ return table.getStartEndKeys();
+ }
+
+ @Override
+ public HRegionLocation getRegionLocation(String row) throws IOException {
+ return table.getRegionLocation(row);
+ }
+
+ @Override
+ public HRegionLocation getRegionLocation(byte[] row) throws IOException {
+ return table.getRegionLocation(row);
+ }
+
+ @Override
+ public HRegionLocation getRegionLocation(byte[] row, boolean reload) throws IOException {
+ return table.getRegionLocation(row, reload);
+ }
}
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java (revision 1552255)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java (working copy)
@@ -43,6 +43,7 @@
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
@@ -65,6 +66,7 @@
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.SortedCopyOnWriteSet;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.io.MultipleIOException;
@@ -612,6 +614,41 @@
byte[] qualifier, long amount, boolean writeToWAL) throws IOException {
return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL);
}
+
+ @Override
+ public byte[][] getSplitKeys() throws IOException {
+ return table.getSplitKeys();
+ }
+
+ @Override
+ public byte[][] getStartKeys() throws IOException {
+ return table.getStartKeys();
+ }
+
+ @Override
+ public byte[][] getEndKeys() throws IOException {
+ return table.getEndKeys();
+ }
+
+ @Override
+ public Pair getStartEndKeys() throws IOException {
+ return table.getStartEndKeys();
+ }
+
+ @Override
+ public HRegionLocation getRegionLocation(String row) throws IOException {
+ return table.getRegionLocation(row);
+ }
+
+ @Override
+ public HRegionLocation getRegionLocation(byte[] row) throws IOException {
+ return table.getRegionLocation(row);
+ }
+
+ @Override
+ public HRegionLocation getRegionLocation(byte[] row, boolean reload) throws IOException {
+ return table.getRegionLocation(row, reload);
+ }
}
/** The coprocessor */
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java (revision 1552255)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
@@ -62,6 +63,7 @@
import org.apache.hadoop.hbase.rest.model.ScannerModel;
import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.util.StringUtils;
import com.google.protobuf.Service;
@@ -797,4 +799,40 @@
long amount, boolean writeToWAL) throws IOException {
throw new IOException("incrementColumnValue not supported");
}
+
+ @Override
+ public byte[][] getSplitKeys() throws IOException {
+ throw new IOException("getSplitKeys not supported");
+ }
+
+ @Override
+ public byte[][] getStartKeys() throws IOException {
+ throw new IOException("getStartKeys not supported");
+ }
+
+ @Override
+ public byte[][] getEndKeys() throws IOException {
+ throw new IOException("getEndKeys not supported");
+ }
+
+ @Override
+ public Pair getStartEndKeys() throws IOException {
+ throw new IOException("getStartEndKeys not supported");
+ }
+
+ @Override
+ public HRegionLocation getRegionLocation(String row) throws IOException {
+ throw new IOException("getRegionLocation not supported");
+ }
+
+ @Override
+ public HRegionLocation getRegionLocation(byte[] row) throws IOException {
+ throw new IOException("getRegionLocation not supported");
+ }
+
+ @Override
+ public HRegionLocation getRegionLocation(byte[] row, boolean reload) throws IOException {
+ throw new IOException("getRegionLocation not supported");
+ }
+
}
Index: hbase-shell/src/main/ruby/hbase/admin.rb
===================================================================
--- hbase-shell/src/main/ruby/hbase/admin.rb (revision 1552255)
+++ hbase-shell/src/main/ruby/hbase/admin.rb (working copy)
@@ -345,14 +345,14 @@
# Truncates table while maintaing region boundaries (deletes all records by recreating the table)
def truncate_preserve(table_name, conf = @conf)
h_table = org.apache.hadoop.hbase.client.HTable.new(conf, table_name)
- splits = h_table.getRegionLocations().keys().map{|i| Bytes.toString(i.getStartKey)}.delete_if{|k| k == ""}.to_java :String
- splits = org.apache.hadoop.hbase.util.Bytes.toByteArrays(splits)
+ raise ArgumentError, "Table #{table_name} is not enabled. Enable it first.'" unless enabled?(table_name)
+ splits = h_table.getSplitKeys()
table_description = h_table.getTableDescriptor()
yield 'Disabling table...' if block_given?
- disable(table_name)
+ @admin.disableTable(table_name)
yield 'Dropping table...' if block_given?
- drop(table_name)
+ @admin.deleteTable(table_name)
yield 'Creating table with region boundaries...' if block_given?
@admin.createTable(table_description, splits)