Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet2.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet2.java (revision 0) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet2.java (revision 0) @@ -0,0 +1,168 @@ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.dfs.MiniDFSCluster; +import org.apache.hadoop.hbase.filter.StopRowFilter; +import org.apache.hadoop.hbase.filter.WhileMatchRowFilter; +import org.apache.hadoop.io.Text; + + +/** + * {@link TestGet} is a medley of tests of get all done up as a single test. + * This class + */ +public class TestGet2 extends HBaseTestCase { + private final Log LOG = LogFactory.getLog(this.getClass().getName()); + private MiniDFSCluster miniHdfs; + + protected void setUp() throws Exception { + super.setUp(); + this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null); + } + + /** + * Tests for HADOOP-2161. + * @throws Exception + */ + public void testGetFull() throws Exception { + HRegion region = null; + HScannerInterface scanner = null; + HLog hlog = new HLog(this.miniHdfs.getFileSystem(), this.testDir, + this.conf, null); + try { + HTableDescriptor htd = createTableDescriptor(getName()); + HRegionInfo hri = new HRegionInfo(htd, null, null); + region = new HRegion(this.testDir, hlog, this.miniHdfs.getFileSystem(), + this.conf, hri, null, null); + for (int i = 0; i < COLUMNS.length; i++) { + addContent(region, COLUMNS[i].toString()); + } + // Find two rows to use doing getFull. + final Text arbitraryStartRow = new Text("b"); + Text actualStartRow = null; + final Text arbitraryStopRow = new Text("c"); + Text actualStopRow = null; + Text [] columns = new Text [] {new Text(COLFAMILY_NAME1)}; + scanner = region.getScanner(columns, + arbitraryStartRow, HConstants.LATEST_TIMESTAMP, + new WhileMatchRowFilter(new StopRowFilter(arbitraryStopRow))); + HStoreKey key = new HStoreKey(); + TreeMap value = new TreeMap(); + while (scanner.next(key, value)) { + if (actualStartRow == null) { + actualStartRow = new Text(key.getRow()); + } else { + actualStopRow = key.getRow(); + } + } + // Assert I got all out. + assertColumnsPresent(region, actualStartRow); + assertColumnsPresent(region, actualStopRow); + // Force a flush so store files come into play. + region.flushcache(); + // Assert I got all out. + assertColumnsPresent(region, actualStartRow); + assertColumnsPresent(region, actualStopRow); + } finally { + if (scanner != null) { + scanner.close(); + } + if (region != null) { + try { + region.close(); + } catch (Exception e) { + e.printStackTrace(); + } + } + hlog.closeAndDelete(); + } + } + + public void testGetAtTimestamp() throws IOException{ + HRegion region = null; + HRegionIncommon region_incommon = null; + HLog hlog = new HLog(this.miniHdfs.getFileSystem(), this.testDir, + this.conf, null); + + try { + HTableDescriptor htd = createTableDescriptor(getName()); + HRegionInfo hri = new HRegionInfo(htd, null, null); + region = new HRegion(this.testDir, hlog, this.miniHdfs.getFileSystem(), + this.conf, hri, null, null); + region_incommon = new HRegionIncommon(region); + + long right_now = System.currentTimeMillis(); + long one_second_ago = right_now - 1000; + + Text t = new Text("test_row"); + long lockid = region_incommon.startBatchUpdate(t); + region_incommon.put(lockid, COLUMNS[0], "old text".getBytes()); + region_incommon.commit(lockid, one_second_ago); + + lockid = region_incommon.startBatchUpdate(t); + region_incommon.put(lockid, COLUMNS[0], "new text".getBytes()); + region_incommon.commit(lockid, right_now); + + assertCellValueEquals(region, t, COLUMNS[0], right_now, "new text"); + assertCellValueEquals(region, t, COLUMNS[0], one_second_ago, "old text"); + + // Force a flush so store files come into play. + region_incommon.flushcache(); + + assertCellValueEquals(region, t, COLUMNS[0], right_now, "new text"); + assertCellValueEquals(region, t, COLUMNS[0], one_second_ago, "old text"); + + } finally { + if (region != null) { + try { + region.close(); + } catch (Exception e) { + e.printStackTrace(); + } + } + hlog.closeAndDelete(); + } + + } + + + private void assertCellValueEquals(final HRegion region, final Text row, + final Text column, final long timestamp, final String value) + throws IOException { + Map result = region.getFull(row, timestamp); + assertEquals("cell value at a given timestamp", new String(result.get(column)), value); + } + + private void assertColumnsPresent(final HRegion r, final Text row) + throws IOException { + Map result = r.getFull(row); + int columnCount = 0; + for (Map.Entry e: result.entrySet()) { + columnCount++; + String column = e.getKey().toString(); + boolean legitColumn = false; + for (int i = 0; i < COLUMNS.length; i++) { + // Assert value is same as row. This is 'nature' of the data added. + assertTrue(row.equals(new Text(e.getValue()))); + if (COLUMNS[i].equals(new Text(column))) { + legitColumn = true; + break; + } + } + assertTrue("is legit column name", legitColumn); + } + assertEquals("count of columns", columnCount, COLUMNS.length); + } + + protected void tearDown() throws Exception { + if (this.miniHdfs != null) { + this.miniHdfs.shutdown(); + } + super.tearDown(); + } +} Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (revision 598407) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (working copy) @@ -187,8 +187,11 @@ Text itCol = itKey.getColumn(); if (results.get(itCol) == null && key.matchesWithoutColumn(itKey)) { byte [] val = tailMap.get(itKey); - results.put(itCol, val); + if (!HLogEdit.isDeleted(val)) { + results.put(itCol, val); + } + } else if (key.getRow().compareTo(itKey.getRow()) < 0) { break; } Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (revision 598407) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (working copy) @@ -1282,6 +1282,29 @@ } /** {@inheritDoc} */ + public MapWritable getRow(final Text regionName, final Text row, final long ts) + throws IOException { + + checkOpen(); + requestCount.incrementAndGet(); + try { + HRegion region = getRegion(regionName); + MapWritable result = new MapWritable(); + Map map = region.getFull(row); + for (Map.Entry es: map.entrySet()) { + result.put(new HStoreKey(row, es.getKey()), + new ImmutableBytesWritable(es.getValue())); + } + return result; + + } catch (IOException e) { + checkFileSystem(); + throw e; + } + } + + + /** {@inheritDoc} */ public MapWritable next(final long scannerId) throws IOException { checkOpen(); Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java (revision 598407) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java (working copy) @@ -324,13 +324,24 @@ } /** - * Get all the data for the specified row + * Get all the data for the specified row at the latest timestamp * * @param row row key * @return map of colums to values * @throws IOException */ public SortedMap getRow(Text row) throws IOException { + return getRow(row, HConstants.LATEST_TIMESTAMP); + } + + /** + * Get all the data for the specified row at a specified timestamp + * + * @param row row key + * @return map of colums to values + * @throws IOException + */ + public SortedMap getRow(Text row, long ts) throws IOException { checkClosed(); MapWritable value = null; for (int tries = 0; tries < numRetries; tries++) { @@ -339,7 +350,7 @@ connection.getHRegionConnection(r.getServerAddress()); try { - value = server.getRow(r.getRegionInfo().getRegionName(), row); + value = server.getRow(r.getRegionInfo().getRegionName(), row, ts); break; } catch (IOException e) { @@ -373,6 +384,7 @@ return results; } + /** * Get a scanner on the current table starting at the specified row. * Return the specified columns. Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java (revision 598407) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java (working copy) @@ -100,6 +100,18 @@ throws IOException; /** + * Get all the data for the specified row at a given timestamp + * + * @param regionName region name + * @param row row key + * @return map of values + * @throws IOException + */ + public MapWritable getRow(final Text regionName, final Text row, final long ts) + throws IOException; + + + /** * Applies a batch of updates via one RPC * * @param regionName name of the region to update Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (revision 598407) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (working copy) @@ -1017,7 +1017,25 @@ * @throws IOException */ public Map getFull(Text row) throws IOException { - HStoreKey key = new HStoreKey(row, System.currentTimeMillis()); + return getFull(row, HConstants.LATEST_TIMESTAMP); + } + + /** + * Fetch all the columns for the indicated row at a specified timestamp. + * Returns a TreeMap that maps column names to values. + * + * We should eventually use Bloom filters here, to reduce running time. If + * the database has many column families and is very sparse, then we could be + * checking many files needlessly. A small Bloom for each row would help us + * determine which column groups are useful for that row. That would let us + * avoid a bunch of disk activity. + * + * @param row + * @return Map values + * @throws IOException + */ + public Map getFull(Text row, long ts) throws IOException { + HStoreKey key = new HStoreKey(row, ts); obtainRowLock(row); try { TreeMap result = new TreeMap();