Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java (revision 1240289) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java (working copy) @@ -196,6 +196,8 @@ // don't exceed max file compact threshold assertEquals(maxFiles, store.compactSelection(sfCreate(7,6,5,4,3,2,1)).getFilesToCompact().size()); + // note: file selection starts with largest to smallest. + compactEquals(store.compactSelection(sfCreate(7,6,5,4,3,2,1)).getFilesToCompact(), 7,6,5,4,3); /* MAJOR COMPACTION */ // if a major compaction has been forced, then compact everything @@ -208,6 +210,7 @@ store.forceMajor = true; assertEquals(maxFiles, store.compactSelection(sfCreate(7,6,5,4,3,2,1)).getFilesToCompact().size()); + compactEquals(store.compactSelection(sfCreate(7,6,5,4,3,2,1)).getFilesToCompact(), 7,6,5,4,3); store.forceMajor = false; // if we exceed maxCompactSize, downgrade to minor @@ -229,6 +232,8 @@ // reference files should obey max file compact to avoid OOM assertEquals(maxFiles, store.compactSelection(sfCreate(true, 7,6,5,4,3,2,1)).getFilesToCompact().size()); + // note: file selection starts with smallest to largest + compactEquals(store.compactSelection(sfCreate(true, 7,6,5,4,3,2,1)).getFilesToCompact(), 5,4,3,2,1); // empty case compactEquals(new ArrayList() /* empty */);