Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestDeleteAll.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestDeleteAll.java (revision 0) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestDeleteAll.java (revision 0) @@ -0,0 +1,122 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.hadoop.dfs.MiniDFSCluster; +import org.apache.hadoop.hbase.filter.StopRowFilter; +import org.apache.hadoop.hbase.filter.WhileMatchRowFilter; +import org.apache.hadoop.io.Text; +import org.apache.commons.logging.*; + +/** + * Test the functionality of deleteAll. + */ +public class TestDeleteAll extends HBaseTestCase { + static final Log LOG = LogFactory.getLog(TestDeleteAll.class); + private MiniDFSCluster miniHdfs; + + @Override + protected void setUp() throws Exception { + super.setUp(); + this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null); + } + + /** + * Tests for HADOOP-1550. + * @throws Exception + */ + public void testDeleteAll() throws Exception { + HRegion region = null; + HRegionIncommon region_incommon = null; + HLog hlog = new HLog(this.miniHdfs.getFileSystem(), this.testDir, + this.conf, null); + + try{ + HTableDescriptor htd = createTableDescriptor(getName()); + HRegionInfo hri = new HRegionInfo(htd, null, null); + region = new HRegion(this.testDir, hlog, this.miniHdfs.getFileSystem(), + this.conf, hri, null, null); + region_incommon = new HRegionIncommon(region); + + // insert a few versions worth of data for a row + Text row = new Text("test_row"); + long t0 = System.currentTimeMillis(); + long t1 = t0 - 15000; + long t2 = t1 - 15000; + + long lock = region_incommon.startUpdate(row); + region_incommon.put(lock, COLUMNS[0], "t0 data".getBytes()); + region_incommon.commit(lock, t0); + + lock = region_incommon.startUpdate(row); + region_incommon.put(lock, COLUMNS[0], "t1 data".getBytes()); + region_incommon.commit(lock, t1); + + lock = region_incommon.startUpdate(row); + region_incommon.put(lock, COLUMNS[0], "t2 data".getBytes()); + region_incommon.commit(lock, t2); + + // call delete all at a timestamp, make sure only the most recent stuff is left behind + region.deleteAll(row, t1); + assertCellValueEquals(region, row, COLUMNS[0], t0, "t0 data"); + assertCellValueEquals(region, row, COLUMNS[0], t1, null); + assertCellValueEquals(region, row, COLUMNS[0], t2, null); + + // call delete all w/o a timestamp, make sure nothing is left. + region.deleteAll(row, HConstants.LATEST_TIMESTAMP); + assertCellValueEquals(region, row, COLUMNS[0], t0, null); + assertCellValueEquals(region, row, COLUMNS[0], t1, null); + assertCellValueEquals(region, row, COLUMNS[0], t2, null); + + } finally { + if (region != null) { + try { + region.close(); + } catch (Exception e) { + e.printStackTrace(); + } + } + hlog.closeAndDelete(); + } + } + + private void assertCellValueEquals(final HRegion region, final Text row, + final Text column, final long timestamp, final String value) + throws IOException { + Map result = region.getFull(row, timestamp); + if(value == null){ + assertEquals("cell value at a given timestamp", result.get(column), null); + } else { + assertEquals("cell value at a given timestamp", new String(result.get(column)), value); + } + + } + + protected void tearDown() throws Exception { + if (this.miniHdfs != null) { + this.miniHdfs.shutdown(); + } + super.tearDown(); + } +} Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (revision 601629) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (working copy) @@ -1447,6 +1447,14 @@ region.deleteAll(row, column, timestamp); } + /** {@inheritDoc} */ + public void deleteAll(final Text regionName, final Text row, + final long timestamp) + throws IOException { + HRegion region = getRegion(regionName); + region.deleteAll(row, timestamp); + } + /** * @return Info on this server. */ Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java (revision 601629) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java (working copy) @@ -742,6 +742,48 @@ } } + /* + * Completely delete the row's cells of the same timestamp or older. + */ + public void deleteAll(final Text row, long ts) + throws IOException { + checkClosed(); + for(int tries = 0; tries < numRetries; tries++) { + HRegionLocation r = getRegionLocation(row); + HRegionInterface server = + connection.getHRegionConnection(r.getServerAddress()); + try { + server.deleteAll(r.getRegionInfo().getRegionName(), row, ts); + break; + + } catch (IOException e) { + if (e instanceof RemoteException) { + e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); + } + if (tries == numRetries - 1) { + throw e; + } + if (LOG.isDebugEnabled()) { + LOG.debug("reloading table servers because: " + e.getMessage()); + } + tableServers = connection.reloadTableServers(tableName); + } + try { + Thread.sleep(this.pause); + } catch (InterruptedException x) { + // continue + } + } + } + + /* + * Completely delete the row's cells. + */ + public void deleteAll(final Text row) + throws IOException { + deleteAll(row, HConstants.LATEST_TIMESTAMP); + } + /** * Abort a row mutation. * Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java (revision 601629) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java (working copy) @@ -134,6 +134,19 @@ */ public void deleteAll(Text regionName, Text row, Text column, long timestamp) throws IOException; + + /** + * Delete all cells that match the passed row and whose + * timestamp is equal-to or older than the passed timestamp. + * + * @param regionName region name + * @param row row key + * @param column column key + * @param timestamp Delete all entries that have this timestamp or older + * @throws IOException + */ + public void deleteAll(Text regionName, Text row, long timestamp) + throws IOException; // // remote scanner interface Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (revision 601629) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (working copy) @@ -1058,7 +1058,7 @@ */ private List getKeys(final HStoreKey origin, final int versions) throws IOException { - + List keys = null; Text colFamily = HStoreKey.extractFamily(origin.getColumn()); HStore targetStore = stores.get(colFamily); @@ -1246,6 +1246,33 @@ releaseRowLock(row); } } + + /** + * Delete all cells of the same age as the passed timestamp or older. + * @param row + * @param column + * @param ts Delete all entries that have this timestamp or older + * @throws IOException + */ + public void deleteAll(final Text row, final long ts) + throws IOException { + + obtainRowLock(row); + + try { + for(Map.Entry store : stores.entrySet()){ + // delete all the cells in the store for this row + HStoreKey origin = new HStoreKey(row, new Text(store.getKey().toString() + ":")); + List keys = getKeys(origin, ALL_VERSIONS); + for(HStoreKey key : keys){ + deleteMultiple(row, key.getColumn(), ts, ALL_VERSIONS); + } + } + } finally { + releaseRowLock(row); + } + } + /** * Delete one or many cells.