Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (revision 575373) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (working copy) @@ -1145,6 +1145,11 @@ * by remote region servers have expired. */ private void letRegionServersShutdown() { + if (!fsOk) { + // Forget waiting for the region servers if the file system has gone + // away. Just exit as quickly as possible. + return; + } synchronized (serversToServerInfo) { while (this.serversToServerInfo.size() > 0) { LOG.info("Waiting on following regionserver(s) to go down (or " + Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java (revision 575373) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java (working copy) @@ -26,7 +26,6 @@ import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; -import java.util.Vector; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; @@ -40,10 +39,9 @@ */ public class HMemcache { static final Log LOG = LogFactory.getLog(HMemcache.class); - TreeMap memcache = - new TreeMap(); - final Vector> history - = new Vector>(); + TreeMap memcache = new TreeMap(); + final ArrayList> history = + new ArrayList>(); TreeMap snapshot = null; final HLocking lock = new HLocking(); @@ -100,7 +98,9 @@ Snapshot retval = new Snapshot(memcache, Long.valueOf(log.startCacheFlush())); this.snapshot = memcache; - history.add(memcache); + synchronized (history) { + history.add(memcache); + } memcache = new TreeMap(); // Reset size of this memcache. this.size.set(0); @@ -123,12 +123,14 @@ if(snapshot == null) { throw new IOException("Snapshot not present!"); } - for (Iterator> it = history.iterator(); - it.hasNext();) { - TreeMap cur = it.next(); - if (snapshot == cur) { - it.remove(); - break; + synchronized (history) { + for (Iterator> it = history.iterator(); + it.hasNext();) { + TreeMap cur = it.next(); + if (snapshot == cur) { + it.remove(); + break; + } } } this.snapshot = null; @@ -178,15 +180,18 @@ this.lock.obtainReadLock(); try { ArrayList results = get(memcache, key, numVersions); - for (int i = history.size() - 1; i >= 0; i--) { - if (numVersions > 0 && results.size() >= numVersions) { - break; + synchronized (history) { + for (int i = history.size() - 1; i >= 0; i--) { + if (numVersions > 0 && results.size() >= numVersions) { + break; + } + results.addAll(results.size(), + get(history.get(i), key, numVersions - results.size())); } - results.addAll(results.size(), - get(history.elementAt(i), key, numVersions - results.size())); } - return (results.size() == 0)? - null: ImmutableBytesWritable.toArray(results); + return (results.size() == 0) ? null : + ImmutableBytesWritable.toArray(results); + } finally { this.lock.releaseReadLock(); } @@ -205,9 +210,11 @@ this.lock.obtainReadLock(); try { internalGetFull(memcache, key, results); - for (int i = history.size()-1; i >= 0; i--) { - TreeMap cur = history.elementAt(i); - internalGetFull(cur, key, results); + synchronized (history) { + for (int i = history.size()-1; i >= 0; i--) { + TreeMap cur = history.get(i); + internalGetFull(cur, key, results); + } } return results; @@ -284,10 +291,12 @@ this.lock.obtainReadLock(); try { List results = getKeys(this.memcache, origin, versions); - for (int i = history.size() - 1; i >= 0; i--) { - results.addAll(results.size(), getKeys(history.elementAt(i), origin, - versions == HConstants.ALL_VERSIONS? versions: - (results != null? versions - results.size(): versions))); + synchronized (history) { + for (int i = history.size() - 1; i >= 0; i--) { + results.addAll(results.size(), getKeys(history.get(i), origin, + versions == HConstants.ALL_VERSIONS ? versions : + (versions - results.size()))); + } } return results; } finally { @@ -366,32 +375,35 @@ super(timestamp, targetCols); lock.obtainReadLock(); try { - this.backingMaps = new TreeMap[history.size() + 1]; - - //NOTE: Since we iterate through the backing maps from 0 to n, we need - // to put the memcache first, the newest history second, ..., etc. - backingMaps[0] = memcache; - for(int i = history.size() - 1; i > 0; i--) { - backingMaps[i] = history.elementAt(i); - } - - this.keyIterators = new Iterator[backingMaps.length]; - this.keys = new HStoreKey[backingMaps.length]; - this.vals = new byte[backingMaps.length][]; + synchronized (history) { + this.backingMaps = new TreeMap[history.size() + 1]; + + //NOTE: Since we iterate through the backing maps from 0 to n, we need + // to put the memcache first, the newest history second, ..., etc. + backingMaps[0] = memcache; + for(int i = history.size() - 1; i > 0; i--) { + backingMaps[i] = history.get(i); + } + + this.keyIterators = new Iterator[backingMaps.length]; + this.keys = new HStoreKey[backingMaps.length]; + this.vals = new byte[backingMaps.length][]; + + // Generate list of iterators + HStoreKey firstKey = new HStoreKey(firstRow); + for(int i = 0; i < backingMaps.length; i++) { + keyIterators[i] = (firstRow != null && firstRow.getLength() != 0) ? + backingMaps[i].tailMap(firstKey).keySet().iterator() : + backingMaps[i].keySet().iterator(); - // Generate list of iterators - HStoreKey firstKey = new HStoreKey(firstRow); - for(int i = 0; i < backingMaps.length; i++) { - keyIterators[i] = (/*firstRow != null &&*/ firstRow.getLength() != 0)? - backingMaps[i].tailMap(firstKey).keySet().iterator(): - backingMaps[i].keySet().iterator(); - while(getNext(i)) { - if(! findFirstRow(i, firstRow)) { - continue; - } - if(columnMatch(i)) { - break; - } + while(getNext(i)) { + if(! findFirstRow(i, firstRow)) { + continue; + } + if(columnMatch(i)) { + break; + } + } } } } catch (RuntimeException ex) { Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (revision 575373) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (working copy) @@ -676,8 +676,10 @@ if (LOG.isDebugEnabled()) { LOG.debug("Got call server startup message"); } - closeAllRegions(); - restart = true; + if (fsOk) { + closeAllRegions(); + restart = true; + } break; case HMsg.MSG_REGIONSERVER_STOP: @@ -689,10 +691,12 @@ break; default: - try { - toDo.put(new ToDoEntry(msgs[i])); - } catch (InterruptedException e) { - throw new RuntimeException("Putting into msgQueue was interrupted.", e); + if (fsOk) { + try { + toDo.put(new ToDoEntry(msgs[i])); + } catch (InterruptedException e) { + throw new RuntimeException("Putting into msgQueue was interrupted.", e); + } } } } @@ -747,20 +751,24 @@ } if (abortRequested) { - try { - log.close(); - LOG.info("On abort, closed hlog"); - } catch (IOException e) { - if (e instanceof RemoteException) { - try { - e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); - } catch (IOException ex) { - e = ex; + if (fsOk) { + // Only try to clean up if the file system is available + + try { + log.close(); + LOG.info("On abort, closed hlog"); + } catch (IOException e) { + if (e instanceof RemoteException) { + try { + e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); + } catch (IOException ex) { + e = ex; + } } + LOG.error("Unable to close log in abort", e); } - LOG.error("Unable to close log in abort", e); + closeAllRegions(); // Don't leave any open file handles } - closeAllRegions(); // Don't leave any open file handles LOG.info("aborting server at: " + serverInfo.getServerAddress().toString()); } else {