diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 681dd3f..f4eaac7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -233,6 +233,8 @@ public class HRegion implements HeapSize { // , Writable{ REPLAY_BATCH_MUTATE, COMPACT_REGION } + public Map currentCompactions = Maps.newConcurrentMap(); + ////////////////////////////////////////////////////////////////////////////// // Members ////////////////////////////////////////////////////////////////////////////// @@ -1109,6 +1111,10 @@ public class HRegion implements HeapSize { // , Writable{ // region. writestate.writesEnabled = false; LOG.debug("Closing " + this + ": disabling compactions & flushes"); + for (Thread t : currentCompactions.keySet()) { + // interrupt any current IO in the currently running compactions. + t.interrupt(); + } waitForFlushesAndCompactions(); } // If we were not just flushing, is it worth doing a preflush...one diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java index e04a715..e30427e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java @@ -26,6 +26,7 @@ import java.util.List; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; @@ -56,6 +57,7 @@ public class DefaultCompactor extends Compactor { List newFiles = new ArrayList(); IOException e = null; try { + ((HStore)store).getHRegion().currentCompactions.put(Thread.currentThread(), store); InternalScanner scanner = null; try { /* Include deletes, unless we are doing a major compaction */ @@ -102,6 +104,7 @@ public class DefaultCompactor extends Compactor { newFiles.add(writer.getPath()); } } + ((HStore)store).getHRegion().currentCompactions.remove(Thread.currentThread()); } return newFiles; }