Index: src/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java (revision 929708) +++ src/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java (working copy) @@ -222,7 +222,25 @@ * not flushed. */ private boolean flushRegion(HRegion region, boolean removeFromQueue) { - checkStoreFileCount(region); + // if removeFromQueue, then we come from flushSomeRegions and we need + // to block if there's too many store files. Else, we don't want to hang + // the main flushing thread so we'll just the region at the end of the + // queue if there's too many files. + if (removeFromQueue) { + checkStoreFileCount(region); + } else if (isTooManyStoreFiles(region)) { + LOG.warn("Region " + region.getRegionNameAsString() + " has too many " + + "store files, putting it back at the end of the flush queue."); + server.compactSplitThread.compactionRequested(region, getName()); + // If there's only this item in the queue or they are all in this + // situation, we will loop at lot. Sleep a bit. + try { + Thread.sleep(500); + } catch (InterruptedException e) { } // just continue + flushQueue.add(region); + // Tell a lie, it's not flushed but it's ok + return true; + } synchronized (regionsInQueue) { // See comment above for removeFromQueue on why we do not // take the region out of the set. If removeFromQueue is true, remove it @@ -310,6 +328,15 @@ } } + private boolean isTooManyStoreFiles(HRegion region) { + for (Store hstore: region.stores.values()) { + if (hstore.getStorefilesCount() > this.blockingStoreFilesNumber) { + return true; + } + } + return false; + } + /** * Check if the regionserver's memstore memory usage is greater than the * limit. If so, flush regions with the biggest memstores until we're down