Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (revision 1358900) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (working copy) @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; @@ -3513,6 +3514,67 @@ this.region = null; } + /** + * Test case to check increment function with Delete and memstore flushing + * @throws Exception + */ + @Test + public void testIncrementWithFlushAndDelete() throws Exception { + Configuration conf = HBaseConfiguration.create(); + String method = "testIncrementWithFlushAndDelete"; + byte[] tableName = Bytes.toBytes(method); + + byte[] rowKey = Bytes.toBytes("test-rowKey"); + byte[] family = Bytes.toBytes("family"); + byte[] newIncAPI = Bytes.toBytes("newAPI"); + byte[] oldIncAPI = Bytes.toBytes("oldAPI"); + + this.region = initHRegion(tableName, method, conf, family); + Get get = null; + Result r = null; + //Increment + for (int j = 0; j < 10; j++) + { + this.region.incrementColumnValue(rowKey, family, oldIncAPI, (long)1, false); + Increment inc = new Increment(rowKey); + inc.addColumn(family, newIncAPI, (long)1); + this.region.increment(inc, null, true); + } + get = new Get(rowKey); + r = this.region.get(get, null); + assertEquals(10l, Bytes.toLong(r.getValue(family, newIncAPI))); + assertEquals(10l, Bytes.toLong(r.getValue(family, oldIncAPI))); + + this.region.flushcache(); + get = new Get(rowKey); + r = this.region.get(get, null); + assertEquals(10l, Bytes.toLong(r.getValue(family, newIncAPI))); + assertEquals(10l, Bytes.toLong(r.getValue(family, oldIncAPI))); + Delete delete = new Delete(rowKey); + this.region.delete(delete, null, true); + get = new Get(rowKey); + r = this.region.get(get, null); + boolean resultIsEmpty = false; + if ((r.getValue(family, newIncAPI) == null) && (r.getValue(family, oldIncAPI) == null) ) { + resultIsEmpty = true; + } + assertTrue(resultIsEmpty); + Thread.sleep(1); + + for (int j = 0; j < 10; j++) + { + this.region.incrementColumnValue(rowKey, family, oldIncAPI, (long)1, false); + Increment inc = new Increment(rowKey); + inc.addColumn(family, newIncAPI, (long)1); + this.region.increment(inc, null, false); + } + get = new Get(rowKey); + r = this.region.get(get, null); + assertEquals(10l, Bytes.toLong(r.getValue(family, newIncAPI))); + assertEquals(10l, Bytes.toLong(r.getValue(family, oldIncAPI))); + + } + private void putData(int startRow, int numRows, byte [] qf, byte [] ...families) throws IOException { Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1358900) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -3687,89 +3687,6 @@ return new Result(results); } - /** - * An optimized version of {@link #get(Get)} that checks MemStore first for - * the specified query. - *
- * This is intended for use by increment operations where we have the - * guarantee that versions are never inserted out-of-order so if a value - * exists in MemStore it is the latest value. - *
- * It only makes sense to use this method without a TimeRange and maxVersions
- * equal to 1.
- * @param get
- * @return result
- * @throws IOException
- */
- private List