Index: conf/hbase-default.xml
===================================================================
--- conf/hbase-default.xml (revision 685639)
+++ conf/hbase-default.xml (working copy)
@@ -245,6 +245,15 @@
+ hbase.hstore.compaction.max
+ 402653184
+ Max Size of HStoreFiles to run compaction on at one time.
+ This keeps us from OOME'ing when we over run out heap space.
+ should be lower then (HBASE_HEAPSPACE - hbase.regionserver.globalMemcacheLimit)
+ Default: 384MB
+
+
+
hbase.regionserver.thread.splitcompactcheckfrequency
20000
How often a region server runs the split/compaction check.
Index: src/java/org/apache/hadoop/hbase/regionserver/HStore.java
===================================================================
--- src/java/org/apache/hadoop/hbase/regionserver/HStore.java (revision 685639)
+++ src/java/org/apache/hadoop/hbase/regionserver/HStore.java (working copy)
@@ -89,7 +89,8 @@
private final long desiredMaxFileSize;
private volatile long storeSize;
-
+ private long maxCompactionSize;
+
private final Integer flushLock = new Integer(0);
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
@@ -178,6 +179,10 @@
// MIN_COMMITS_FOR_COMPACTION map files
this.compactionThreshold =
conf.getInt("hbase.hstore.compactionThreshold", 3);
+
+ // HBASE-775 used to make sure we do not compaction more then this size in memory and OOME
+ this.maxCompactionSize =
+ conf.getLong("hbase.hstore.compaction.max", 402653184);
// By default we split region if a file > DEFAULT_MAX_FILE_SIZE.
long maxFileSize = info.getTableDesc().getMaxFileSize();
@@ -776,11 +781,14 @@
// The rule is: if the largest(oldest) one is more than twice the
// size of the second, skip the largest, and continue to next...,
// until we meet the compactionThreshold limit.
+
+ long compactionSize = totalSize;
for (point = 0; point < compactionThreshold - 1; point++) {
- if (fileSizes[point] < fileSizes[point + 1] * 2) {
+ if ( (fileSizes[point] < fileSizes[point + 1] * 2) && (compactionSize < maxCompactionSize) ) {
break;
}
skipped += fileSizes[point];
+ compactionSize -= fileSizes[point];
}
filesToCompact = new ArrayList(filesToCompact.subList(point,
countOfFiles));