Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1403958)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy)
@@ -81,7 +81,6 @@
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
@@ -116,7 +115,7 @@
import org.apache.hadoop.hbase.ipc.RpcCallContext;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl.WriteEntry;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.metrics.OperationMetrics;
import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage;
@@ -4544,11 +4543,7 @@
// TODO: There's a lot of boiler plate code identical
// to increment... See how to better unify that.
/**
- *
* Perform one or more append operations on a row.
- *
- * Appends performed are done under row lock but reads do not take locks out
- * so this can be seen partially complete by gets and scans.
*
* @param append
* @param lockid
@@ -4558,7 +4553,6 @@
*/
public Result append(Append append, Integer lockid, boolean writeToWAL)
throws IOException {
- // TODO: Use MVCC to make this set of appends atomic to reads
byte[] row = append.getRow();
checkRow(row, "append");
boolean flush = false;
@@ -4572,9 +4566,16 @@
// Lock row
startRegionOperation();
this.writeRequestsCount.increment();
+ WriteEntry w = null;
try {
Integer lid = getLock(lockid, row, true);
this.updatesLock.readLock().lock();
+ // wait for all prior MVCC transactions to finish - while we hold the row lock
+ // (so that we are guaranteed to see the latest state)
+ WriteEntry tmp = mvcc.beginMemstoreInsert();
+ mvcc.completeMemstoreInsert(tmp);
+ // now start my own transaction
+ w = mvcc.beginMemstoreInsert();
try {
long now = EnvironmentEdgeManager.currentTimeMillis();
// Process each family
@@ -4637,6 +4638,7 @@
newKV.getBuffer(), newKV.getQualifierOffset(),
kv.getQualifierLength());
+ newKV.setMemstoreTS(w.getWriteNumber());
kvs.add(newKV);
// Append update to WAL
@@ -4666,7 +4668,7 @@
//Actually write to Memstore now
for (Map.Entry> entry : tempMemstore.entrySet()) {
Store store = entry.getKey();
- size += store.upsert(entry.getValue());
+ size += store.upsert(entry.getValue(), getSmallestReadPoint());
allKVs.addAll(entry.getValue());
}
size = this.addAndGetGlobalMemstoreSize(size);
@@ -4679,6 +4681,9 @@
syncOrDefer(txid); // sync the transaction log outside the rowlock
}
} finally {
+ if (w != null) {
+ mvcc.completeMemstoreInsert(w);
+ }
closeRegionOperation();
}
@@ -4696,11 +4701,7 @@
}
/**
- *
* Perform one or more increment operations on a row.
- *
- * Increments performed are done under row lock but reads do not take locks
- * out so this can be seen partially complete by gets and scans.
* @param increment
* @param lockid
* @param writeToWAL
@@ -4710,7 +4711,6 @@
public Result increment(Increment increment, Integer lockid,
boolean writeToWAL)
throws IOException {
- // TODO: Use MVCC to make this set of increments atomic to reads
byte [] row = increment.getRow();
checkRow(row, "increment");
TimeRange tr = increment.getTimeRange();
@@ -4725,9 +4725,16 @@
// Lock row
startRegionOperation();
this.writeRequestsCount.increment();
+ WriteEntry w = null;
try {
Integer lid = getLock(lockid, row, true);
this.updatesLock.readLock().lock();
+ // wait for all prior MVCC transactions to finish - while we hold the row lock
+ // (so that we are guaranteed to see the latest state)
+ WriteEntry tmp = mvcc.beginMemstoreInsert();
+ mvcc.completeMemstoreInsert(tmp);
+ // now start my own transaction
+ w = mvcc.beginMemstoreInsert();
try {
long now = EnvironmentEdgeManager.currentTimeMillis();
// Process each family
@@ -4766,6 +4773,7 @@
// Append new incremented KeyValue to list
KeyValue newKV = new KeyValue(row, family.getKey(), column.getKey(),
now, Bytes.toBytes(amount));
+ newKV.setMemstoreTS(w.getWriteNumber());
kvs.add(newKV);
// Prepare WAL updates
@@ -4794,7 +4802,7 @@
//Actually write to Memstore now
for (Map.Entry> entry : tempMemstore.entrySet()) {
Store store = entry.getKey();
- size += store.upsert(entry.getValue());
+ size += store.upsert(entry.getValue(), getSmallestReadPoint());
allKVs.addAll(entry.getValue());
}
size = this.addAndGetGlobalMemstoreSize(size);
@@ -4807,6 +4815,9 @@
syncOrDefer(txid); // sync the transaction log outside the rowlock
}
} finally {
+ if (w != null) {
+ mvcc.completeMemstoreInsert(w);
+ }
closeRegionOperation();
long after = EnvironmentEdgeManager.currentTimeMillis();
this.opMetrics.updateIncrementMetrics(increment.getFamilyMap().keySet(), after - before);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java (revision 1403958)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java (working copy)
@@ -2033,7 +2033,19 @@
return this.region.regionInfo;
}
- @Override
+ /**
+ * Used in tests. TODO: Remove
+ *
+ * Updates the value for the given row/family/qualifier. This function will always be seen as
+ * atomic by other readers because it only puts a single KV to memstore. Thus no read/write
+ * control necessary.
+ * @param row row to update
+ * @param f family to update
+ * @param qualifier qualifier to update
+ * @param newValue the new value to set into memstore
+ * @return memstore size delta
+ * @throws IOException
+ */
public long updateColumnValue(byte [] row, byte [] f,
byte [] qualifier, long newValue)
throws IOException {
@@ -2054,11 +2066,10 @@
}
@Override
- public long upsert(Iterable kvs) throws IOException {
+ public long upsert(Iterable kvs, long readpoint) throws IOException {
this.lock.readLock().lock();
try {
- // TODO: Make this operation atomic w/ MVCC
- return this.memstore.upsert(kvs);
+ return this.memstore.upsert(kvs, readpoint);
} finally {
this.lock.readLock().unlock();
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java (revision 1403958)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java (working copy)
@@ -435,6 +435,8 @@
}
/**
+ * Only used by tests. TODO: Remove
+ *
* Given the specs of a column, update it, first by inserting a new record,
* then removing the old one. Since there is only 1 KeyValue involved, the memstoreTS
* will be set to 0, thus ensuring that they instantly appear to anyone. The underlying
@@ -449,7 +451,7 @@
* @param now
* @return Timestamp
*/
- public long updateColumnValue(byte[] row,
+ long updateColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long newValue,
@@ -497,7 +499,7 @@
// create or update (upsert) a new KeyValue with
// 'now' and a 0 memstoreTS == immediately visible
return upsert(Arrays.asList(
- new KeyValue(row, family, qualifier, now, Bytes.toBytes(newValue)))
+ new KeyValue(row, family, qualifier, now, Bytes.toBytes(newValue))), 1L
);
} finally {
this.lock.readLock().unlock();
@@ -519,15 +521,15 @@
* atomically. Scans will only see each KeyValue update as atomic.
*
* @param kvs
+ * @param readpoint readpoint below which we can safely remove duplicate KVs
* @return change in memstore size
*/
- public long upsert(Iterable kvs) {
+ public long upsert(Iterable kvs, long readpoint) {
this.lock.readLock().lock();
try {
long size = 0;
for (KeyValue kv : kvs) {
- kv.setMemstoreTS(0);
- size += upsert(kv);
+ size += upsert(kv, readpoint);
}
return size;
} finally {
@@ -549,7 +551,7 @@
* @param kv
* @return change in size of MemStore
*/
- private long upsert(KeyValue kv) {
+ private long upsert(KeyValue kv, long readpoint) {
// Add the KeyValue to the MemStore
// Use the internalAdd method here since we (a) already have a lock
// and (b) cannot safely use the MSLAB here without potentially
@@ -566,6 +568,7 @@
kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength());
SortedSet ss = kvset.tailSet(firstKv);
Iterator it = ss.iterator();
+ int versionsOlderThanReadpoint = 0;
while ( it.hasNext() ) {
KeyValue cur = it.next();
@@ -573,23 +576,23 @@
// ignore the one just put in
continue;
}
- // if this isn't the row we are interested in, then bail
- if (!kv.matchingRow(cur)) {
- break;
- }
+ // check that this is the row and column we are interested in, otherwise bail
+ if (kv.matchingRow(cur) && kv.matchingQualifier(cur)) {
+ // only remove Puts that concurrent scanners cannot possibly see
+ if (cur.getType() == KeyValue.Type.Put.getCode() && cur.getMemstoreTS() < readpoint) {
+ if (versionsOlderThanReadpoint > 1) {
+ // if we get here we have seen at least one version older than the readpoint,
+ // which means we can prove that no scanner will see this version
- // if the qualifier matches and it's a put, remove it
- if (kv.matchingQualifier(cur)) {
-
- // to be extra safe we only remove Puts that have a memstoreTS==0
- if (kv.getType() == KeyValue.Type.Put.getCode() &&
- kv.getMemstoreTS() == 0) {
- // false means there was a change, so give us the size.
- addedSize -= heapSizeChange(kv, true);
- it.remove();
+ // false means there was a change, so give us the size.
+ addedSize -= heapSizeChange(cur, true);
+ it.remove();
+ } else {
+ versionsOlderThanReadpoint++;
+ }
}
} else {
- // past the column, done
+ // past the row or column, done
break;
}
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (revision 1403958)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (working copy)
@@ -75,20 +75,6 @@
throws IOException;
/**
- * Updates the value for the given row/family/qualifier. This function will always be seen as
- * atomic by other readers because it only puts a single KV to memstore. Thus no read/write
- * control necessary.
- * @param row row to update
- * @param f family to update
- * @param qualifier qualifier to update
- * @param newValue the new value to set into memstore
- * @return memstore size delta
- * @throws IOException
- */
- public long updateColumnValue(byte[] row, byte[] f, byte[] qualifier, long newValue)
- throws IOException;
-
- /**
* Adds or replaces the specified KeyValues.
*
* For each KeyValue specified, if a cell with the same row, family, and qualifier exists in
@@ -97,10 +83,11 @@
* This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic
* across all of them.
* @param kvs
+ * @param readpoint readpoint below which we can safely remove duplicate KVs
* @return memstore size delta
* @throws IOException
*/
- public long upsert(Iterable kvs) throws IOException;
+ public long upsert(Iterable kvs, long readpoint) throws IOException;
/**
* Adds a value to the memstore
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java (revision 1403958)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java (working copy)
@@ -108,7 +108,7 @@
public void testIncrementMultiThreads() throws IOException {
LOG.info("Starting test testIncrementMultiThreads");
- initHRegion(tableName, getName(), fam1);
+ initHRegion(tableName, getName(), fam1, fam2);
// create 100 threads, each will increment by its own quantity
int numThreads = 100;
@@ -135,6 +135,8 @@
}
}
assertICV(row, fam1, qual1, expectedTotal);
+ assertICV(row, fam1, qual2, expectedTotal*2);
+ assertICV(row, fam2, qual3, expectedTotal*3);
LOG.info("testIncrementMultiThreads successfully verified that total is " +
expectedTotal);
}
@@ -166,7 +168,9 @@
throws IOException{
HTableDescriptor htd = new HTableDescriptor(tableName);
for(byte [] family : families) {
- htd.addFamily(new HColumnDescriptor(family));
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMaxVersions(1);
+ htd.addFamily(hcd);
}
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
Path path = new Path(DIR + callingMethod);
@@ -184,18 +188,14 @@
public static class Incrementer extends Thread {
private final HRegion region;
- private final int threadNumber;
private final int numIncrements;
private final int amount;
- private int count;
public Incrementer(HRegion region,
int threadNumber, int amount, int numIncrements) {
this.region = region;
- this.threadNumber = threadNumber;
this.numIncrements = numIncrements;
- this.count = 0;
this.amount = amount;
setDaemon(true);
}
@@ -206,16 +206,78 @@
try {
Increment inc = new Increment(row);
inc.addColumn(fam1, qual1, amount);
- Result result = region.increment(inc, null, true);
- // LOG.info("thread:" + threadNumber + " iter:" + i);
+ inc.addColumn(fam1, qual2, amount*2);
+ inc.addColumn(fam2, qual3, amount*3);
+ region.increment(inc, null, true);
+
+ // verify: Make sure we only see completed increments
+ Get g = new Get(row);
+ Result result = region.get(g, null);
+ assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, Bytes.toLong(result.getValue(fam1, qual2)));
+ assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3, Bytes.toLong(result.getValue(fam2, qual3)));
} catch (IOException e) {
e.printStackTrace();
}
- count++;
}
}
}
+ public void testAppendMultiThreads() throws IOException {
+ LOG.info("Starting test testAppendMultiThreads");
+ initHRegion(tableName, getName(), fam1, fam2);
+
+ int numThreads = 100;
+ int opsPerThread = 100;
+ AtomicOperation[] all = new AtomicOperation[numThreads];
+ final byte[] val = new byte[]{1};
+
+ AtomicInteger failures = new AtomicInteger(0);
+ // create all threads
+ for (int i = 0; i < numThreads; i++) {
+ all[i] = new AtomicOperation(region, opsPerThread, null, failures) {
+ @Override
+ public void run() {
+ for (int i=0; i