Index: src/main/java/org/apache/hadoop/hbase/regionserver/Store.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (revision 1065506) +++ src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (working copy) @@ -725,17 +725,17 @@ * @param dir * @throws IOException */ - private static long getLowestTimestamp(FileSystem fs, + private static long getLowestTimestamp(FileSystem fs, final List candidates) throws IOException { long minTs = Long.MAX_VALUE; if (candidates.isEmpty()) { - return minTs; + return minTs; } Path[] p = new Path[candidates.size()]; for (int i = 0; i < candidates.size(); ++i) { p[i] = candidates.get(i).getPath(); } - + FileStatus[] stats = fs.listStatus(p); if (stats == null || stats.length == 0) { return minTs; @@ -756,13 +756,13 @@ return false; } } - + List candidates = new ArrayList(this.storefiles); // exclude files above the max compaction threshold // except: save all references. we MUST compact them int pos = 0; - while (pos < candidates.size() && + while (pos < candidates.size() && candidates.get(pos).getReader().length() > this.maxCompactSize && !candidates.get(pos).isReference()) ++pos; candidates.subList(0, pos).clear(); @@ -868,7 +868,7 @@ // do not compact old files above a configurable threshold // save all references. we MUST compact them int pos = 0; - while (pos < filesToCompact.size() && + while (pos < filesToCompact.size() && filesToCompact.get(pos).getReader().length() > maxCompactSize && !filesToCompact.get(pos).isReference()) ++pos; filesToCompact.subList(0, pos).clear(); @@ -878,7 +878,7 @@ LOG.debug(this.storeNameStr + ": no store files to compact"); return filesToCompact; } - + // major compact on user action or age (caveat: we have too many files) boolean majorcompaction = (forcemajor || isMajorCompaction(filesToCompact)) && filesToCompact.size() < this.maxFilesToCompact; @@ -891,7 +891,7 @@ int start = 0; double r = this.compactRatio; - /* TODO: add sorting + unit test back in when HBASE-2856 is fixed + /* TODO: add sorting + unit test back in when HBASE-2856 is fixed // Sort files by size to correct when normal skew is altered by bulk load. Collections.sort(filesToCompact, StoreFile.Comparators.FILE_SIZE); */ @@ -1320,11 +1320,12 @@ this.lock.readLock().lock(); try { // sanity checks - if (!force) { - if (storeSize < this.desiredMaxFileSize || this.storefiles.isEmpty()) { - return null; - } + if (this.storefiles.isEmpty()) { + return null; } + if (!force && storeSize < this.desiredMaxFileSize) { + return null; + } if (this.region.getRegionInfo().isMetaRegion()) { if (force) { Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (revision 1065506) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (working copy) @@ -61,7 +61,7 @@ import com.google.common.base.Joiner; /** - * Test class fosr the Store + * Test class for the Store */ public class TestStore extends TestCase { public static final Log LOG = LogFactory.getLog(TestStore.class); @@ -630,4 +630,15 @@ result = HBaseTestingUtility.getFromStoreFile(store, get); assertTrue(result.size()==0); } + + /** + * Test for HBASE-3492 - Test split on empty colfam (no store files). + * + * @throws IOException When the IO operations fail. + */ + public void testSplitWithEmptyColFam() throws IOException { + init(this.getName()); + assertNull(store.checkSplit(false)); + assertNull(store.checkSplit(true)); + } }