Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (revision 1147282) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (working copy) @@ -1290,7 +1290,8 @@ try { LOG.info("" + addContent(region, fam3)); region.flushcache(); - byte [] splitRow = region.compactStores(); + region.compactStores(); + byte [] splitRow = region.checkSplit(); assertNotNull(splitRow); LOG.info("SplitRow: " + Bytes.toString(splitRow)); HRegion [] subregions = splitRegion(region, splitRow); @@ -2295,7 +2296,8 @@ try { LOG.info("" + addContent(region, fam3)); region.flushcache(); - byte [] splitRow = region.compactStores(); + region.compactStores(); + byte [] splitRow = region.checkSplit(); assertNotNull(splitRow); LOG.info("SplitRow: " + Bytes.toString(splitRow)); HRegion [] regions = splitRegion(region, splitRow); @@ -2329,7 +2331,8 @@ byte [][] midkeys = new byte [regions.length][]; // To make regions splitable force compaction. for (int i = 0; i < regions.length; i++) { - midkeys[i] = regions[i].compactStores(); + regions[i].compactStores(); + midkeys[i] = regions[i].checkSplit(); } TreeMap sortedMap = new TreeMap(); Index: src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (revision 1147282) +++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (working copy) @@ -146,7 +146,11 @@ addContent(region, fam3); region.flushcache(); } - byte [] splitRow = region.compactStores(); + + region.compactStores(); + + byte [] splitRow = region.checkSplit(); + assertNotNull(splitRow); HRegion [] regions = split(region, splitRow); for (int i = 0; i < regions.length; i++) { Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1147282) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -815,33 +815,27 @@ } /** - * Called by compaction thread and after region is opened to compact the - * HStores if necessary. - * - *

This operation could block for a long time, so don't call it from a - * time-sensitive thread. - * - * Note that no locking is necessary at this level because compaction only - * conflicts with a region split, and that cannot happen because the region - * server does them sequentially and not in parallel. + * This is a helper function that compact all the stores synchronously + * It is used by utilities and testing * * @param majorCompaction True to force a major compaction regardless of thresholds - * @return split row if split is needed * @throws IOException e */ - byte [] compactStores(final boolean majorCompaction) + void compactStores(final boolean majorCompaction) throws IOException { if (majorCompaction) { this.triggerMajorCompaction(); } - return compactStores(); + compactStores(); } /** - * Compact all the stores and return the split key of the first store that needs - * to be split. + * This is a helper function that compact all the stores synchronously + * It is used by utilities and testing + * + * @throws IOException e */ - public byte[] compactStores() throws IOException { + public void compactStores() throws IOException { for(Store s : getStores().values()) { CompactionRequest cr = s.requestCompaction(); if(cr != null) { @@ -851,12 +845,7 @@ s.finishRequest(cr); } } - byte[] splitRow = s.checkSplit(); - if (splitRow != null) { - return splitRow; - } } - return null; } /* @@ -3829,7 +3818,7 @@ // nothing } - byte[] checkSplit() { + public byte[] checkSplit() { if (this.splitPoint != null) { return this.splitPoint; }