Index: src/java/org/apache/hadoop/hbase/io/RowsBatchUpdate.java =================================================================== --- src/java/org/apache/hadoop/hbase/io/RowsBatchUpdate.java (revision 0) +++ src/java/org/apache/hadoop/hbase/io/RowsBatchUpdate.java (revision 0) @@ -0,0 +1,92 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; + +import org.apache.hadoop.io.Writable; +/** + * A writable object that contains a series of BatchUpdates + * + * At this point, this class serves only as a wrapper and will not + * provide a performance gain comparable to doing an SQL INSERT with + * many values until a later version. + */ +public class RowsBatchUpdate implements Writable, Iterable{ + + /** + * BatchUpdates container + */ + private ArrayList updates = + new ArrayList(); + + /** + * Default constructor + * + */ + public RowsBatchUpdate() { + this.updates = new ArrayList(); + } + + /** + * Returns all inserted BatchUpdates + * @return the updates + */ + public ArrayList getUpdates() { + return updates; + } + + /** + * Adds a new BatchUpdate in the list + * @param update + */ + public void add(BatchUpdate update) { + updates.add(update); + } + + /** + * @return Iterator + */ + public Iterator iterator() { + return updates.iterator(); + } + + public void readFields(DataInput input) throws IOException { + int nUpdate = input.readInt(); + for (int i = 0; i < nUpdate; i++) { + BatchUpdate op = new BatchUpdate(); + op.readFields(input); + this.updates.add(op); + } + + } + + public void write(DataOutput output) throws IOException { + output.writeInt(updates.size()); + for (BatchUpdate op: updates) { + op.write(output); + } + + } +} Index: src/java/org/apache/hadoop/hbase/client/HTable.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/HTable.java (revision 677307) +++ src/java/org/apache/hadoop/hbase/client/HTable.java (working copy) @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.io.RowsBatchUpdate; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Text; @@ -1168,6 +1169,17 @@ } ); } + + /** + * Commit a RowsBatchUpdate to the table. + * @param batchUpdate + * @throws IOException + */ + public synchronized void commit(final RowsBatchUpdate rowsBatchUpdate) + throws IOException { + for(BatchUpdate batchUpdate : rowsBatchUpdate) + commit(batchUpdate); + } /** * Implements the scanner interface for the HBase client. Index: src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java =================================================================== --- src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java (revision 677307) +++ src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java (working copy) @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.io.RowsBatchUpdate; import org.apache.hadoop.hbase.util.Bytes; /** @@ -41,6 +42,7 @@ private static final String SMALLFAM_STR = "smallfam:"; private static final byte [] SMALLFAM = Bytes.toBytes(SMALLFAM_STR); private static final int SMALL_LENGTH = 1; + private static final int NB_BATCH_ROWS = 10; private byte[] value; private byte[] smallValue; @@ -125,4 +127,25 @@ fail("Value is long enough, should not throw exception"); } } + + public void testRowsBatchUpdate() { + RowsBatchUpdate rowsUpdate = new RowsBatchUpdate(); + for(int i = 0; i < NB_BATCH_ROWS; i++) { + BatchUpdate batchUpdate = new BatchUpdate("row"+i); + batchUpdate.put(CONTENTS, value); + rowsUpdate.add(batchUpdate); + } + try { + table.commit(rowsUpdate); + + byte [][] columns = { CONTENTS }; + Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW); + int nbRows = 0; + for(RowResult row : scanner) + nbRows++; + assertEquals(NB_BATCH_ROWS, nbRows); + } catch (IOException e) { + fail("This is unexpected : " + e); + } + } }