Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java (revision 575950) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java (working copy) @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -43,10 +44,10 @@ // Note that since these structures are always accessed with a lock held, // no additional synchronization is required. - TreeMap memcache = new TreeMap(); - final ArrayList> history = - new ArrayList>(); - TreeMap snapshot = null; + volatile SortedMap memcache; + List> history = + Collections.synchronizedList(new ArrayList>()); + volatile SortedMap snapshot = null; final HLocking lock = new HLocking(); @@ -62,6 +63,8 @@ */ public HMemcache() { super(); + memcache = + Collections.synchronizedSortedMap(new TreeMap()); } /** represents the state of the memcache at a specified point in time */ @@ -66,10 +69,10 @@ /** represents the state of the memcache at a specified point in time */ static class Snapshot { - final TreeMap memcacheSnapshot; + final SortedMap memcacheSnapshot; final long sequenceId; - Snapshot(final TreeMap memcache, final Long i) { + Snapshot(final SortedMap memcache, final Long i) { super(); this.memcacheSnapshot = memcache; this.sequenceId = i.longValue(); @@ -102,8 +105,11 @@ Snapshot retval = new Snapshot(memcache, Long.valueOf(log.startCacheFlush())); this.snapshot = memcache; - history.add(memcache); - memcache = new TreeMap(); + synchronized (history) { + history.add(memcache); + } + memcache = + Collections.synchronizedSortedMap(new TreeMap()); // Reset size of this memcache. this.size.set(0); return retval; @@ -125,14 +131,8 @@ if(snapshot == null) { throw new IOException("Snapshot not present!"); } - for (Iterator> it = history.iterator(); - it.hasNext(); ) { - - TreeMap cur = it.next(); - if (snapshot == cur) { - it.remove(); - break; - } + synchronized (history) { + history.remove(snapshot); } this.snapshot = null; } finally { @@ -181,12 +181,14 @@ this.lock.obtainReadLock(); try { ArrayList results = get(memcache, key, numVersions); - for (int i = history.size() - 1; i >= 0; i--) { - if (numVersions > 0 && results.size() >= numVersions) { - break; + synchronized (history) { + for (int i = history.size() - 1; i >= 0; i--) { + if (numVersions > 0 && results.size() >= numVersions) { + break; + } + results.addAll(results.size(), + get(history.get(i), key, numVersions - results.size())); } - results.addAll(results.size(), - get(history.get(i), key, numVersions - results.size())); } return (results.size() == 0) ? null : ImmutableBytesWritable.toArray(results); @@ -209,9 +211,11 @@ this.lock.obtainReadLock(); try { internalGetFull(memcache, key, results); - for (int i = history.size() - 1; i >= 0; i--) { - TreeMap cur = history.get(i); - internalGetFull(cur, key, results); + synchronized (history) { + for (int i = history.size() - 1; i >= 0; i--) { + SortedMap cur = history.get(i); + internalGetFull(cur, key, results); + } } return results; @@ -220,7 +224,7 @@ } } - void internalGetFull(TreeMap map, HStoreKey key, + void internalGetFull(SortedMap map, HStoreKey key, TreeMap results) { SortedMap tailMap = map.tailMap(key); for (Map.Entry es: tailMap.entrySet()) { @@ -251,7 +255,7 @@ * @return Ordered list of items found in passed map. If no * matching values, returns an empty list (does not return null). */ - ArrayList get(final TreeMap map, + ArrayList get(final SortedMap map, final HStoreKey key, final int numVersions) { ArrayList result = new ArrayList(); // TODO: If get is of a particular version -- numVersions == 1 -- we @@ -288,10 +292,12 @@ this.lock.obtainReadLock(); try { List results = getKeys(this.memcache, origin, versions); - for (int i = history.size() - 1; i >= 0; i--) { - results.addAll(results.size(), getKeys(history.get(i), origin, - versions == HConstants.ALL_VERSIONS ? versions : - (versions - results.size()))); + synchronized (history) { + for (int i = history.size() - 1; i >= 0; i--) { + results.addAll(results.size(), getKeys(history.get(i), origin, + versions == HConstants.ALL_VERSIONS ? versions : + (versions - results.size()))); + } } return results; } finally { @@ -307,7 +313,7 @@ * equal or older timestamp. If no keys, returns an empty List. Does not * return null. */ - private List getKeys(final TreeMap map, + private List getKeys(final SortedMap map, final HStoreKey origin, final int versions) { List result = new ArrayList(); SortedMap tailMap = map.tailMap(origin); @@ -359,7 +365,7 @@ ////////////////////////////////////////////////////////////////////////////// class HMemcacheScanner extends HAbstractScanner { - final TreeMap backingMaps[]; + SortedMap backingMaps[]; final Iterator keyIterators[]; @SuppressWarnings("unchecked") @@ -369,14 +375,16 @@ super(timestamp, targetCols); lock.obtainReadLock(); try { - this.backingMaps = new TreeMap[history.size() + 1]; + synchronized (history) { + this.backingMaps = new SortedMap[history.size() + 1]; - // Note that since we iterate through the backing maps from 0 to n, we - // need to put the memcache first, the newest history second, ..., etc. + // Note that since we iterate through the backing maps from 0 to n, we + // need to put the memcache first, the newest history second, ..., etc. - backingMaps[0] = memcache; - for (int i = history.size() - 1; i > 0; i--) { - backingMaps[i + 1] = history.get(i); + backingMaps[0] = memcache; + for (int i = history.size() - 1; i >= 0; i--) { + backingMaps[i + 1] = history.get(i); + } } this.keyIterators = new Iterator[backingMaps.length]; @@ -387,9 +395,13 @@ HStoreKey firstKey = new HStoreKey(firstRow); for (int i = 0; i < backingMaps.length; i++) { - keyIterators[i] = firstRow.getLength() != 0 ? - backingMaps[i].tailMap(firstKey).keySet().iterator() : - backingMaps[i].keySet().iterator(); + if (firstRow != null && firstRow.getLength() != 0) { + keyIterators[i] = + backingMaps[i].tailMap(firstKey).keySet().iterator(); + + } else { + keyIterators[i] = backingMaps[i].keySet().iterator(); + } while (getNext(i)) { if (!findFirstRow(i, firstRow)) { Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (revision 575950) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (working copy) @@ -1599,6 +1599,7 @@ return multipleMatchers; } + /** {@inheritDoc} */ public boolean next(HStoreKey key, SortedMap results) throws IOException { // Filtered flag is set by filters. If a cell has been 'filtered out' Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (revision 575950) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (working copy) @@ -31,6 +31,7 @@ import java.util.List; import java.util.Map; import java.util.Random; +import java.util.SortedMap; import java.util.TreeMap; import java.util.Vector; import java.util.Map.Entry; @@ -439,7 +440,7 @@ * @param logCacheFlushId flush sequence number * @throws IOException */ - void flushCache(final TreeMap inputCache, + void flushCache(final SortedMap inputCache, final long logCacheFlushId) throws IOException { flushCacheHelper(inputCache, logCacheFlushId, true); @@ -445,7 +446,7 @@ flushCacheHelper(inputCache, logCacheFlushId, true); } - void flushCacheHelper(TreeMap inputCache, + void flushCacheHelper(SortedMap inputCache, long logCacheFlushId, boolean addToAvailableMaps) throws IOException { synchronized(flushLock) { @@ -1123,7 +1124,7 @@ * @param key * @param numVersions Number of versions to fetch. Must be > 0. * @param memcache Checked for deletions - * @return + * @return values for the specified versions * @throws IOException */ byte [][] get(HStoreKey key, int numVersions, final HMemcache memcache) @@ -1171,10 +1172,11 @@ break; } } - while ((readval = new ImmutableBytesWritable()) != null && + for (readval = new ImmutableBytesWritable(); map.next(readkey, readval) && readkey.matchesRowCol(key) && - !hasEnoughVersions(numVersions, results)) { + !hasEnoughVersions(numVersions, results); + readval = new ImmutableBytesWritable()) { if (!isDeleted(readkey, readval.get(), memcache, deletes)) { results.add(readval.get()); } @@ -1212,10 +1214,11 @@ * @throws IOException */ List getKeys(final HStoreKey origin, List allKeys, - final int versions) - throws IOException { - if (allKeys == null) { - allKeys = new ArrayList(); + final int versions) throws IOException { + + List keys = allKeys; + if (keys == null) { + keys = new ArrayList(); } // This code below is very close to the body of the get method. this.lock.obtainReadLock(); @@ -1238,16 +1241,17 @@ continue; } if (!isDeleted(readkey, readval.get(), null, null) && - !allKeys.contains(readkey)) { - allKeys.add(new HStoreKey(readkey)); + !keys.contains(readkey)) { + keys.add(new HStoreKey(readkey)); } - while ((readval = new ImmutableBytesWritable()) != null && + for (readval = new ImmutableBytesWritable(); map.next(readkey, readval) && - readkey.matchesRowCol(origin)) { + readkey.matchesRowCol(origin); + readval = new ImmutableBytesWritable()) { if (!isDeleted(readkey, readval.get(), null, null) && - !allKeys.contains(readkey)) { - allKeys.add(new HStoreKey(readkey)); - if (versions != ALL_VERSIONS && allKeys.size() >= versions) { + !keys.contains(readkey)) { + keys.add(new HStoreKey(readkey)); + if (versions != ALL_VERSIONS && keys.size() >= versions) { break; } } @@ -1254,7 +1258,7 @@ } } } - return allKeys; + return keys; } finally { this.lock.releaseReadLock(); } Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (revision 575950) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (working copy) @@ -50,7 +50,8 @@ private FileSystem fs; private Path parentdir; private MasterThread masterThread = null; - ArrayList regionThreads; + ArrayList regionThreads = + new ArrayList(); private boolean deleteOnExit = true; /** @@ -125,7 +126,7 @@ this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)); fs.mkdirs(parentdir); this.masterThread = startMaster(this.conf); - this.regionThreads = startRegionServers(this.conf, nRegionNodes); + this.regionThreads.addAll(startRegionServers(this.conf, nRegionNodes)); } catch(IOException e) { shutdown(); throw e; @@ -357,17 +358,15 @@ if(masterThread != null) { masterThread.getMaster().shutdown(); } - if (regionServerThreads != null) { - synchronized(regionServerThreads) { - if (regionServerThreads != null) { - for(Thread t: regionServerThreads) { - if (t.isAlive()) { - try { - t.join(); - } catch (InterruptedException e) { - // continue - } - } + // regionServerThreads can never be null because they are initialized when + // the class is constructed. + synchronized(regionServerThreads) { + for(Thread t: regionServerThreads) { + if (t.isAlive()) { + try { + t.join(); + } catch (InterruptedException e) { + // continue } } } @@ -381,8 +380,7 @@ } LOG.info("Shutdown " + ((masterThread != null)? masterThread.getName(): "0 masters") + " " + - ((regionServerThreads == null)? 0: regionServerThreads.size()) + - " region server(s)"); + regionServerThreads.size() + " region server(s)"); } void shutdown() { Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java (revision 575950) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java (working copy) @@ -22,6 +22,7 @@ import java.io.IOException; import java.io.UnsupportedEncodingException; import java.util.Map; +import java.util.SortedMap; import java.util.TreeMap; import junit.framework.TestCase; @@ -97,7 +98,7 @@ // Save off old state. int oldHistorySize = hmc.history.size(); - TreeMap oldMemcache = hmc.memcache; + SortedMap oldMemcache = hmc.memcache; // Run snapshot. Snapshot s = hmc.snapshotMemcacheForLog(log); // Make some assertions about what just happened.