Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java (revision 575373) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java (working copy) @@ -28,7 +28,7 @@ /** constructor */ public HBaseConfiguration() { super(); - addDefaultResource("hbase-default.xml"); - addDefaultResource("hbase-site.xml"); + addResource("hbase-default.xml"); + addResource("hbase-site.xml"); } } Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (revision 575373) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (working copy) @@ -1145,6 +1145,11 @@ * by remote region servers have expired. */ private void letRegionServersShutdown() { + if (!fsOk) { + // Forget waiting for the region servers if the file system has gone + // away. Just exit as quickly as possible. + return; + } synchronized (serversToServerInfo) { while (this.serversToServerInfo.size() > 0) { LOG.info("Waiting on following regionserver(s) to go down (or " + Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java (revision 575373) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java (working copy) @@ -26,7 +26,6 @@ import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; -import java.util.Vector; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; @@ -40,10 +39,13 @@ */ public class HMemcache { static final Log LOG = LogFactory.getLog(HMemcache.class); - TreeMap memcache = - new TreeMap(); - final Vector> history - = new Vector>(); + + // Note that since these structures are always accessed with a lock held, + // no additional synchronization is required. + + TreeMap memcache = new TreeMap(); + final ArrayList> history = + new ArrayList>(); TreeMap snapshot = null; final HLocking lock = new HLocking(); @@ -124,7 +126,8 @@ throw new IOException("Snapshot not present!"); } for (Iterator> it = history.iterator(); - it.hasNext();) { + it.hasNext(); ) { + TreeMap cur = it.next(); if (snapshot == cur) { it.remove(); @@ -183,10 +186,11 @@ break; } results.addAll(results.size(), - get(history.elementAt(i), key, numVersions - results.size())); + get(history.get(i), key, numVersions - results.size())); } - return (results.size() == 0)? - null: ImmutableBytesWritable.toArray(results); + return (results.size() == 0) ? null : + ImmutableBytesWritable.toArray(results); + } finally { this.lock.releaseReadLock(); } @@ -205,8 +209,8 @@ this.lock.obtainReadLock(); try { internalGetFull(memcache, key, results); - for (int i = history.size()-1; i >= 0; i--) { - TreeMap cur = history.elementAt(i); + for (int i = history.size() - 1; i >= 0; i--) { + TreeMap cur = history.get(i); internalGetFull(cur, key, results); } return results; @@ -285,9 +289,9 @@ try { List results = getKeys(this.memcache, origin, versions); for (int i = history.size() - 1; i >= 0; i--) { - results.addAll(results.size(), getKeys(history.elementAt(i), origin, - versions == HConstants.ALL_VERSIONS? versions: - (results != null? versions - results.size(): versions))); + results.addAll(results.size(), getKeys(history.get(i), origin, + versions == HConstants.ALL_VERSIONS ? versions : + (versions - results.size()))); } return results; } finally { @@ -345,9 +349,15 @@ * Return a scanner over the keys in the HMemcache */ HInternalScannerInterface getScanner(long timestamp, - Text targetCols[], Text firstRow) - throws IOException { - return new HMemcacheScanner(timestamp, targetCols, firstRow); + Text targetCols[], Text firstRow) throws IOException { + + this.lock.obtainReadLock(); + try { + return new HMemcacheScanner(timestamp, targetCols, firstRow); + + } finally { + this.lock.releaseReadLock(); + } } ////////////////////////////////////////////////////////////////////////////// @@ -361,20 +371,20 @@ @SuppressWarnings("unchecked") HMemcacheScanner(final long timestamp, final Text targetCols[], - final Text firstRow) - throws IOException { + final Text firstRow) throws IOException { + super(timestamp, targetCols); - lock.obtainReadLock(); try { this.backingMaps = new TreeMap[history.size() + 1]; - - //NOTE: Since we iterate through the backing maps from 0 to n, we need - // to put the memcache first, the newest history second, ..., etc. + + // Note that since we iterate through the backing maps from 0 to n, we + // need to put the memcache first, the newest history second, ..., etc. + backingMaps[0] = memcache; - for(int i = history.size() - 1; i > 0; i--) { - backingMaps[i] = history.elementAt(i); + for (int i = history.size() - 1; i > 0; i--) { + backingMaps[i + 1] = history.get(i); } - + this.keyIterators = new Iterator[backingMaps.length]; this.keys = new HStoreKey[backingMaps.length]; this.vals = new byte[backingMaps.length][]; @@ -380,16 +390,18 @@ this.vals = new byte[backingMaps.length][]; // Generate list of iterators + HStoreKey firstKey = new HStoreKey(firstRow); - for(int i = 0; i < backingMaps.length; i++) { - keyIterators[i] = (/*firstRow != null &&*/ firstRow.getLength() != 0)? - backingMaps[i].tailMap(firstKey).keySet().iterator(): - backingMaps[i].keySet().iterator(); - while(getNext(i)) { - if(! findFirstRow(i, firstRow)) { + for (int i = 0; i < backingMaps.length; i++) { + keyIterators[i] = firstRow.getLength() != 0 ? + backingMaps[i].tailMap(firstKey).keySet().iterator() : + backingMaps[i].keySet().iterator(); + + while (getNext(i)) { + if (!findFirstRow(i, firstRow)) { continue; } - if(columnMatch(i)) { + if (columnMatch(i)) { break; } } Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (revision 575373) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (working copy) @@ -676,8 +676,10 @@ if (LOG.isDebugEnabled()) { LOG.debug("Got call server startup message"); } - closeAllRegions(); - restart = true; + if (fsOk) { + closeAllRegions(); + restart = true; + } break; case HMsg.MSG_REGIONSERVER_STOP: @@ -689,10 +691,12 @@ break; default: - try { - toDo.put(new ToDoEntry(msgs[i])); - } catch (InterruptedException e) { - throw new RuntimeException("Putting into msgQueue was interrupted.", e); + if (fsOk) { + try { + toDo.put(new ToDoEntry(msgs[i])); + } catch (InterruptedException e) { + throw new RuntimeException("Putting into msgQueue was interrupted.", e); + } } } } @@ -747,20 +751,24 @@ } if (abortRequested) { - try { - log.close(); - LOG.info("On abort, closed hlog"); - } catch (IOException e) { - if (e instanceof RemoteException) { - try { - e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); - } catch (IOException ex) { - e = ex; + if (fsOk) { + // Only try to clean up if the file system is available + + try { + log.close(); + LOG.info("On abort, closed hlog"); + } catch (IOException e) { + if (e instanceof RemoteException) { + try { + e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); + } catch (IOException ex) { + e = ex; + } } + LOG.error("Unable to close log in abort", e); } - LOG.error("Unable to close log in abort", e); + closeAllRegions(); // Don't leave any open file handles } - closeAllRegions(); // Don't leave any open file handles LOG.info("aborting server at: " + serverInfo.getServerAddress().toString()); } else {