Index: src/test/org/apache/hadoop/hbase/client/TestHTable.java =================================================================== --- src/test/org/apache/hadoop/hbase/client/TestHTable.java (revision 907740) +++ src/test/org/apache/hadoop/hbase/client/TestHTable.java (working copy) @@ -20,7 +20,12 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.NavigableMap; +import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HBaseClusterTestCase; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -33,6 +38,7 @@ * Tests HTable */ public class TestHTable extends HBaseClusterTestCase implements HConstants { + private static final HColumnDescriptor column = new HColumnDescriptor(CATALOG_FAMILY); @@ -45,10 +51,7 @@ private static final byte [] attrName = Bytes.toBytes("TESTATTR"); private static final byte [] attrValue = Bytes.toBytes("somevalue"); - - - public void testGet() throws IOException { HTable table = null; try { @@ -130,8 +133,6 @@ } } - - /** * the test * @throws IOException @@ -256,7 +257,7 @@ fail(); } } - + /** * For HADOOP-2579 */ @@ -272,7 +273,7 @@ e.getClass()); } } - + public void testGetClosestRowBefore() throws IOException { HColumnDescriptor column2 = new HColumnDescriptor(Bytes.toBytes("info2:")); @@ -367,4 +368,293 @@ assertTrue(scan.getFamilyMap().get(CATALOG_FAMILY).size() == 0); } + ///////////////////////////////////////////////////////////////////////////// + // Multi + ///////////////////////////////////////////////////////////////////////////// + + private static final byte [] familyName2 = Bytes.toBytes("info2"); + private static final byte [] familyName3 = Bytes.toBytes("info3"); + private static final byte [] tableName = Bytes.toBytes("table"); + private static final String qualifierRoot = "qualifier"; + private static final int nr = 1000; + private HBaseAdmin admin; + private HTable table; + + void setupMulti() throws IOException { + HColumnDescriptor fam2 = new HColumnDescriptor(familyName2); + HColumnDescriptor fam3 = new HColumnDescriptor(familyName3); + admin = new HBaseAdmin(conf); + HTableDescriptor testTableDesc = new HTableDescriptor(tableName); + testTableDesc.addFamily(fam2); + testTableDesc.addFamily(fam3); + admin.createTable(testTableDesc); + table = new HTable(conf, tableName); + } + + public void testMultiGet() throws Exception { + setupMulti(); + + final List multiRes = new ArrayList(); + final List singleRes = new ArrayList(); + + System.out.println("Starting multiGet tester"); + + // Create Qualifiers + List qualifiers = createQualifiers(qualifierRoot, nr); + + ClusterStatus cs = admin.getClusterStatus(); + System.out.println("regions " + cs.getRegionsCount()); + + // Putting + int regions = 1; + int posForSplit = nr / (regions); + int pos = 0; + for (int region = 0; region < regions; region++) { + System.out.println("Splitting"); + admin.split(tableName); + Thread.sleep(4000); + + List puts = new ArrayList(); + for (; pos < (region + 1) * posForSplit; pos++) { + Put put = new Put(row); + put.add(familyName2, qualifiers.get(pos), Bytes.toBytes(1000 + pos)); + puts.add(put); + } + System.out.println("Putting"); + table.put(puts); + } + + cs = admin.getClusterStatus(); + System.out.println("regions " + cs.getRegionsCount()); + + // Create list of gets + final List gets = new ArrayList(); + for (byte[] qualifier : qualifiers) { + Get get = new Get(row); + get.addColumn(familyName2, qualifier); + gets.add(get); + } + + // Send the list to multiGet + time(new DurationMeasurable("multiget - columns") { + public void call() throws IOException { + multiRes.addAll(Arrays.asList(table.batch(gets))); + } + }); + + // Regular - Make multiple get calls with the list + time(new DurationMeasurable("multiple gets - columns") { + public void call() throws IOException { + for (Row get : gets) { + singleRes.add(table.get((Get) get)); + } + } + }); + + // Compare results + System.out.println("results len " + singleRes.size()); + assertEquals(singleRes.size(), multiRes.size()); + for (int i = 0; i < singleRes.size(); i++) { + KeyValue[] singleKvs = singleRes.get(i).raw(); + KeyValue[] multiKvs = multiRes.get(i).raw(); + for (int j = 0; j < singleKvs.length; j++) { + assertEquals(singleKvs[j], multiKvs[j]); + assertEquals(Bytes.compareTo(singleKvs[j].getValue(), multiKvs[j].getValue()), 0); + } + } + + } + + public void testMultiPut() throws Exception { + setupMulti(); + + final int startValue = 100; + final List qualifiers = createQualifiers(qualifierRoot, nr); + + System.out.println("Starting multiPut tester"); + + // MultiPut (multiple puts to insert column values in one row) + time(new DurationMeasurable("multiput - columns") { + public void call() throws IOException { + multiPutIncrementingValues(table, familyName2, qualifiers, startValue); + } + }); + + // Validate by getting all values for the relevant row / family + + Get get = new Get(row); + get.addFamily(familyName2); + Result result = table.get(get); + NavigableMap keyValues = result.getFamilyMap(familyName2); + + int idx = 0; + for (byte[] qual : qualifiers) { + byte [] val = keyValues.get(qual); + int expectedVal = idx + startValue; + assertEquals(expectedVal,Bytes.toInt(val)); + idx++; + } + assertEquals(qualifiers.size(),keyValues.size()); + + // MultiPut (multiple rows) + + final byte [] colname = Bytes.toBytes("aColumn"); + List multiPuts = new ArrayList(nr); + for(int i=0; i puts = new ArrayList(nr); + for(int i=0; i qualifiers = createQualifiers(qualifierRoot, nr); + + // Putting + multiPutIncrementingValues(table, familyName2, qualifiers, 1); + + // Multi Deleting + + time(new DurationMeasurable("multidelete - columns") { + public void call() throws IOException { + List multiDeletes = new ArrayList(nr); + for (int i = 0; i < nr; i++) { + Delete delete = new Delete(row); + delete.deleteColumn(familyName2, qualifiers.get(i)); + multiDeletes.add(delete); + } + table.batch(multiDeletes); + } + }); + + // Validate deleted + + Get get = new Get(row); + get.addFamily(familyName2); + Result result = table.get(get); +// NavigableMap keyValues = result.getFamilyMap(familyName2); + +// int idx = 0; +// for (byte[] qual : qualifiers) { +// byte [] val = keyValues.get(qual); +// int expectedVal = idx + startValue; +// assertEquals(expectedVal,Bytes.toInt(val)); +// idx++; +// } + // result should be empty + + assertEquals(0,result.size()); + + + // Putting again + multiPutIncrementingValues(table, familyName2, qualifiers, 1); + + // Deleting + + time(new DurationMeasurable("multiple deletes - columns") { + public void call() throws IOException { + List deletes = new ArrayList(nr); + for (int i = 0; i < nr; i++) { + Delete delete = new Delete(row); + delete.deleteColumn(familyName2, qualifiers.get(i)); + deletes.add(delete); + table.delete(delete); + } + } + }); + + } + + ///////////////////////////////////////////////////////////////////////////// + // Helpers + ///////////////////////////////////////////////////////////////////////////// + + private long time(DurationMeasurable measurable) throws Exception { + long start = System.currentTimeMillis(); + measurable.call(); + long stop = System.currentTimeMillis(); + long dur = stop - start; + System.out.println("Timer " + measurable.getName() + " " + dur + " milliseconds"); + return dur; + } + + private List createQualifiers(String qualifierRoot, int nr) { + List qualifiers = new ArrayList(); + for (int i = 0; i < nr; i++) { + qualifiers.add(Bytes.toBytes(qualifierRoot + i)); + } + return qualifiers; + } + + // put multiple values as columns in a single row + private void multiPutIncrementingValues(HTable table, byte[] familyName, List qualifiers, int startValue) + throws IOException { + int nr = qualifiers.size(); + List multiPuts = new ArrayList(nr); + for (int i = 0; i < nr; i++) { + Put put = new Put(row); + put.add(familyName, qualifiers.get(i), Bytes.toBytes(startValue + i)); + multiPuts.add(put); + } + table.batch(multiPuts); + } + } + +abstract class DurationMeasurable { + private String name; + abstract public void call() throws Exception; + public DurationMeasurable(String name) { + this.name = name; + } + public void setName(String name) { + this.name = name; + } + public String getName() { + return name; + } +} + Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 907740) +++ src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy) @@ -78,8 +78,11 @@ import org.apache.hadoop.hbase.Leases.LeaseStillHeldException; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Multi; +import org.apache.hadoop.hbase.client.MultiResult; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.ServerConnection; import org.apache.hadoop.hbase.client.ServerConnectionManager; @@ -2499,4 +2502,35 @@ doMain(args, regionServerClass); } + public MultiResult multi(Multi multi) throws IOException { + List regionNames = multi.getRegionNames(); + List actions = multi.getActions(); + int size = actions.size(); + + Result[] results = new Result[size]; + + for (int i = 0; i < size; i++) { + Row action = actions.get(i); + Result result = null; + try { + if (action instanceof Delete) { + delete(regionNames.get(i), (Delete) action); + result = new Result(); + } else if (action instanceof Get) { + result = get(regionNames.get(i), (Get) action); + } else { // action instanceof Put + put(regionNames.get(i), (Put) action); + result = new Result(); + } + results[i] = result; + } catch (Exception e) { + // Have to be able to catch different types of exceptions here so we can + // know if a region related exception is thrown + } + } + + HServerAddress address = new HServerAddress(this.server.getListenerAddress()); + return new MultiResult(address, results); + } + } Index: src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java =================================================================== --- src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java (revision 907740) +++ src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java (working copy) @@ -41,10 +41,12 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Multi; +import org.apache.hadoop.hbase.client.MultiResult; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.filter.*; import org.apache.hadoop.hbase.io.HbaseMapWritable; import org.apache.hadoop.io.MapWritable; @@ -154,6 +156,13 @@ addToMap(FirstKeyOnlyFilter.class, code++); addToMap(Delete [].class, code++); + + // + // Multi + // + addToMap(Row.class, code++); + addToMap(Multi.class, code++); + addToMap(MultiResult.class, code++); } private Class declaredClass; Index: src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java =================================================================== --- src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java (revision 907740) +++ src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java (working copy) @@ -26,6 +26,8 @@ import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Multi; +import org.apache.hadoop.hbase.client.MultiResult; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -245,4 +247,13 @@ * @throws IOException */ public HServerInfo getHServerInfo() throws IOException; + + /** + * Method used for doing multiple actions(Deletes, Gets and Puts) in one call + * @param multi + * @return MultiResult + * @throws IOException + */ + public MultiResult multi(Multi multi) throws IOException; + } Index: src/java/org/apache/hadoop/hbase/client/HTable.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/HTable.java (revision 907740) +++ src/java/org/apache/hadoop/hbase/client/HTable.java (working copy) @@ -26,6 +26,8 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -83,6 +85,10 @@ this(new HBaseConfiguration(), Bytes.toBytes(tableName)); } + private ExecutorService pool; + private int timeout; + private int nrHRS; + /** * Creates an object to access a HBase table * @@ -130,6 +136,12 @@ this.autoFlush = true; this.currentWriteBufferSize = 0; this.scannerCaching = conf.getInt("hbase.client.scanner.caching", 1); + + this.nrHRS = getCurrentNrHRS(); + int nrThreads = conf.getInt("hbase.htable.threads.max", nrHRS); + this.pool = Executors.newFixedThreadPool(nrThreads); + this.timeout = conf.getInt("hbase.htable.timeout", 30); + this.maxScannerResultSize = conf.getLong( HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); @@ -424,7 +436,49 @@ } /** + * Set the timeout for multi requests. * + * @param timeout + */ + public void setTimeout(int timeout) { + this.timeout = timeout; + } + + /** + * Get the timeout for multi requests. + * + * @return the current timeout + */ + public int getTimeout() { + return timeout; + } + + /** + * Method that does a batch call on Deletes, Gets and Puts. + * + * @param actions + * @return the results from the actions. A null in the return array + * means that the call for that action failed, even after retries + * @throws IOException + */ + public synchronized Result[] batch(final List actions) throws IOException { + return connection.processBatch(actions, tableName, pool, nrHRS, timeout); + } + + /** + * TODO Might want to change this to public, would be nice if the number of + * threads would atomatically change when servers were added and removed + * + * @return the number of region servers that are currently running + * @throws IOException + */ + private int getCurrentNrHRS() throws IOException { + HBaseAdmin admin = new HBaseAdmin(configuration); + return admin.getClusterStatus().getServers(); + } + + /** + * * @param delete * @throws IOException * @since 0.20.0 @@ -623,6 +677,7 @@ */ public void close() throws IOException{ flushCommits(); + pool.shutdownNow(); } /** Index: src/java/org/apache/hadoop/hbase/client/HConnectionManager.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/HConnectionManager.java (revision 907740) +++ src/java/org/apache/hadoop/hbase/client/HConnectionManager.java (working copy) @@ -30,6 +30,9 @@ import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -51,6 +54,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.MetaUtils; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.SoftValueSortedMap; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper; @@ -1286,5 +1290,316 @@ } } } - } + + /** + * Base process calls for batching of Deletes, Gets and Puts + * @param actions + * @param tableName + * @param pool + * @param nrOfServers + * @param timeout + * @return result of the actions + * @throws IOException + */ + public Result [] processBatch(List actions, final byte [] tableName, + ExecutorService pool, int nrOfServers, int timeout) + throws IOException { + int size = actions.size(); + + // Map from the region server to the actions that are going to be sent to it + Map, List>> rsMap = + new HashMap, List>>(nrOfServers); + + // Map used for putting action results back into the same order and to deal + // with calls that failed + Map> indexMap = + new HashMap>(); + + // Populates the two maps + buildServerAndIndexMaps(actions, tableName, rsMap, indexMap); + + // The result to return + Result [] results = new Result [size]; + + // Futures for results returned + List> futures = new ArrayList>(); + + // Adding tasks to thread pool + for(final Map.Entry,List>> entry : + rsMap.entrySet()) { + + Multi multi = new Multi(entry.getValue()); + futures.add(pool.submit(createCallable(entry.getKey(), multi, tableName))); + } + + // Retry list + List> retries = new ArrayList>(); + + // Waiting and checking results from the individual calls + for(Future f : futures){ + try { + processResult(f.get(timeout, TimeUnit.SECONDS), rsMap, indexMap, + results, retries); + } catch (Exception e){ + + throw new IOException(e); + } + } + + // Doing the retries + // Not expecting this to happend to often, can be made more effective later + doRetries(retries, results, tableName); + + return results; + } + + + /** + * Creating the map from server to a list of actions and an index map so the + * correct return position can be found + * @param actions + * @param rsMap map from server address to regionName and action + * @param indexMap map from server address to original position + * @throws IOException + */ + private void buildServerAndIndexMaps(List actions, byte [] tableName, + Map, List>> rsMap, + Map> indexMap) + throws IOException { + + int size = actions.size(); + boolean lastRow = false; + + // List to original position, to help when returning results in the incomming + // order + List index = new ArrayList(size); + + // Holder for the temporary sorted list of actions + List sortedActions = new ArrayList(size); + + // TODO check if it actually makes things faster by sorting the actions first + // Sort the action list and put the mapping from unsorted to sorted in index + sortAndBuildIndex(actions, sortedActions, index); + // If actions was already sorted + if(sortedActions.size() == 0) { + sortedActions = actions; + } + + byte [] nextRow = sortedActions.get(0).getRow(); + HRegionLocation location = locateRegion(tableName, nextRow, true); + byte [] endRow = location.getRegionInfo().getEndKey(); + HServerAddress address = location.getServerAddress(); + byte [] regionName = location.getRegionInfo().getRegionName(); + + HServerAddress nextAddress = null; + + List regionNames = new ArrayList(); + List currentActions = new ArrayList(); + List indexes = new ArrayList(); + + // Looping through all the actions and put them in the region server and + // index maps + for(int i=0; i,List>( + regionNames, currentActions)); + indexMap.put(address, indexes); + address = nextAddress; + + Pair,List> pair = rsMap.get(address); + if (address == null) { + // Creating new list for the next server + regionNames = new ArrayList(); + currentActions = new ArrayList(); + indexes = new ArrayList(); + } else { + // Getting old lists + regionNames = pair.getFirst(); + currentActions = pair.getSecond(); + indexes = indexMap.get(address); + } + } + } + } + + /** + * Method that sorts one list and returns it in another. It also builds an + * index so that the original position can be found + * @param actions + * @param sorted + * @param index + */ + private void sortAndBuildIndex(List actions, List sorted, + List index) { + // Check if already sorted + if(!sorted(actions)) { + for(Row action : actions) { + sorted.add(action); + } + Collections.sort(sorted); + + // Build index, naive + for(Row action : actions) { + //TODO need to check if we need to be able to handle insertions to the + //same row, since it kind of defeats the purpose of batching. + index.add(Collections.binarySearch(sorted, action)); + } + } else { + for(int i=0; i list) { + for(int i=0; i 0){ + return false; + } + } + return true; + } + + /** + * Helper method to create the callable method + * @param address + * @param multi + * @return + */ + private MultiCallable createCallable( + final HServerAddress address, final Multi multi, + final byte [] tableName) { + return new MultiCallable(this) { + public MultiResult call() throws IOException { + return connection.getRegionServerWithRetries( + new ServerCallable(connection, tableName, null) { + public MultiResult call() throws IOException { + return server.multi(multi); + } + + @Override + public void instantiateServer(boolean reload) throws IOException { + //TODO check this + server = connection.getHRegionConnection(address); + } + + //TODO + //Implement rest of the methods + } + ); + } + }; + } + + /** + * Helper method for processing of results + * @param multi + * @param results + * @param failedRegions + * @throws IOException + */ + private void processResult(MultiResult multiRes, + Map, List>> rsMap, + Map> positions, + Result [] results, List> retries) + throws IOException { + + Result [] returns = multiRes.getResults(); + HServerAddress address = multiRes.getServerAddress(); + List poss = positions.get(address); + + //fails + List actions = null; + + for(int i=0; i(actions.get(i), pos)); + } else { + results [pos] = res; + } + } + } + + /** + * Handles the retries for Gets that were sent to the wrong region server + * @param fails + * @param results + * @throws IOException + * @throws RuntimeException + */ + private void doRetries(List> fails, Result [] results, + byte [] tableName) + throws IOException, RuntimeException { + Result res = null; + + for(final Pair fail : fails) { + for(int tries=0; tries(this, tableName, null) { + public Result call() throws IOException { + Row action = fail.getFirst(); + if(action instanceof Delete) { + server.delete(location.getRegionInfo().getRegionName(), + (Delete)fail.getFirst()); + } else if(action instanceof Get) { + return server.get(location.getRegionInfo().getRegionName(), + (Get)fail.getFirst()); + } else { // retry instanceof Put + server.put(location.getRegionInfo().getRegionName(), + (Put)fail.getFirst()); + } + return null; + } + } + ); + + if (res != null) { + break; + } + } + if (res == null) { + //Write error log message + } + results [fail.getSecond()] = res; + } + } + } + } Index: src/java/org/apache/hadoop/hbase/client/Row.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/Row.java (revision 907740) +++ src/java/org/apache/hadoop/hbase/client/Row.java (working copy) @@ -19,12 +19,15 @@ */ package org.apache.hadoop.hbase.client; +import org.apache.hadoop.io.WritableComparable; + /** * Has a row. */ -interface Row { +public interface Row extends WritableComparable { /** * @return The row. */ public byte [] getRow(); -} \ No newline at end of file + +} Index: src/java/org/apache/hadoop/hbase/client/HConnection.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/HConnection.java (revision 907740) +++ src/java/org/apache/hadoop/hbase/client/HConnection.java (working copy) @@ -21,6 +21,8 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HServerAddress; @@ -208,4 +210,19 @@ */ public int processBatchOfDeletes(ArrayList list, byte[] tableName) throws IOException; -} \ No newline at end of file + + /** + * Base process calls for batching of Deletes, Gets and Puts + * @param actions + * @param tableName + * @param pool + * @param nrOfServers + * @param timeout + * @return result of the actions + * @throws IOException + */ + public Result [] processBatch(List actions, final byte [] tableName, + ExecutorService pool, int nrOfServers, int timeout) + throws IOException; + +} Index: src/java/org/apache/hadoop/hbase/client/MultiCallable.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/MultiCallable.java (revision 0) +++ src/java/org/apache/hadoop/hbase/client/MultiCallable.java (revision 0) @@ -0,0 +1,19 @@ +package org.apache.hadoop.hbase.client; + +import java.util.concurrent.Callable; + +/** + * Abstract class that implemente Callable + * @param + */ +public abstract class MultiCallable implements Callable { + protected HConnection connection = null; + + /** + * Constructor + * @param connection + */ + public MultiCallable(HConnection connection) { + this.connection = connection; + } +} Index: src/java/org/apache/hadoop/hbase/client/Get.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/Get.java (revision 907740) +++ src/java/org/apache/hadoop/hbase/client/Get.java (working copy) @@ -61,7 +61,7 @@ *

* To add a filter, execute {@link #setFilter(Filter) setFilter}. */ -public class Get implements Writable { +public class Get implements Writable, Row, Comparable { private byte [] row = null; private long lockId = -1L; private int maxVersions = 1; @@ -349,6 +349,11 @@ return sb.toString(); } + //Row + public int compareTo(Row del) { + return Bytes.compareTo(this.getRow(), del.getRow()); + } + //Writable public void readFields(final DataInput in) throws IOException { Index: src/java/org/apache/hadoop/hbase/client/MultiResult.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/MultiResult.java (revision 0) +++ src/java/org/apache/hadoop/hbase/client/MultiResult.java (revision 0) @@ -0,0 +1,78 @@ +package org.apache.hadoop.hbase.client; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.io.Writable; + +/** + * Wrapper class used with the multiGet call + */ +public class MultiResult implements Writable { + + private HServerAddress address = null; + private Result [] results = null; + + ///////////////////////////////////////////////////////////////////////////// + // Constructors + ///////////////////////////////////////////////////////////////////////////// + /** Constructor for Writable. DO NOT USE */ + public MultiResult() {} + + /** + * Constructor + * @param address + * @param results + */ + public MultiResult(HServerAddress address, Result [] results) { + this.address = address; + this.results = results; + } + + ///////////////////////////////////////////////////////////////////////////// + // Getters + ///////////////////////////////////////////////////////////////////////////// + /** + * @return the address where this result came from + */ + public HServerAddress getServerAddress() { + return address; + } + + /** + * @return the results for this RegionServer + */ + public Result [] getResults() { + return results; + } + + ///////////////////////////////////////////////////////////////////////////// + // Writable + ///////////////////////////////////////////////////////////////////////////// + public void readFields(final DataInput in) + throws IOException { + this.address = new HServerAddress(); + this.address.readFields(in); + + int len = in.readInt(); + this.results = new Result [len]; + for(int i=0; i actions = null; + private List regionNames = null; + + ///////////////////////////////////////////////////////////////////////////// + // Constructors + ///////////////////////////////////////////////////////////////////////////// + /** Constructor for Writable. DO NOT USE */ + public Multi() {} + + /** + * Constructor + * @param pair + */ + public Multi(Pair, List> pair) { + this.regionNames = pair.getFirst(); + this.actions = pair.getSecond(); + } + + ///////////////////////////////////////////////////////////////////////////// + // Getters + ///////////////////////////////////////////////////////////////////////////// + /** + * @return the list of gets + */ + public List getActions() { + return this.actions; + } + + /** + * @return the list of region names + */ + public List getRegionNames() { + return this.regionNames; + } + + ///////////////////////////////////////////////////////////////////////////// + // Writable + ///////////////////////////////////////////////////////////////////////////// + public void readFields(final DataInput in) + throws IOException { + int size = in.readInt(); + this.regionNames = new ArrayList(size); + this.actions = new ArrayList(size); + + for (int i=0; i