diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 3b023f0..befeb88 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -55,6 +55,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
+import java.util.Random;
import java.util.TreeMap;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -4927,6 +4928,57 @@ public class TestHRegion {
}
}
+ /**
+ * From https://groups.google.com/d/msg/opentsdb/_filFPXRmMs/EgYdVY8wuokJ
+ *
+ *
"""
+ * We found the root cause.
+ * 1. We use the milliseconds in opentsdb metrics, which may generate over 32000 metrics in
+ * one hour. each column of milliseconds metrics uses 4 bytes, when compaction, the integrated
+ * column size may exceed the 128KB (hfile.index.block.max.size)
+ * 2. If the size of ( rowkey + columnfamily:qualifer ) > hfile.index.block.max.size. this may
+ * cause the memstore flush to infinite loop during writing the hifle index.
+ *
+ * That's why the compaction hangs, and the tmp folder of regions on hdfs increases all the
+ * time. and makes the region server down.
+ * """
+ */
+ @Test
+ public void testTSDBDenseColumn() throws IOException {
+ final Configuration conf = TEST_UTIL.getConfiguration();
+ final int maxIndexSize = conf.getInt("hfile.index.block.max.size", 128 * 1024);
+ final int maxKeyValueSize = conf.getInt("hbase.client.keyvalue.maxsize", 10 * 1024 * 1024);
+ final String method = name.getMethodName();
+ final byte[] tableName = Bytes.toBytes(method);
+ final byte[] family = new byte[Byte.MAX_VALUE];
+ final Random rand = new Random(1003589);
+
+ // generate a random Byte.MAX_VALUE-length family of ASCII characters
+ for (int i = 0; i < family.length; i++) {
+ family[i] = (byte) (rand.nextInt(26) + 65);
+ }
+
+ this.region = initHRegion(tableName, method, family);
+
+ // insert a Cell that is larger than maxIndexSize.
+ byte[] k = new byte[Short.MAX_VALUE]; // largest possible rowkey
+ byte[] v = new byte[maxKeyValueSize - Short.MAX_VALUE];
+ rand.nextBytes(k);
+ rand.nextBytes(v);
+ for (int i = 0; i < 15 /* 15 * 10mb kv's = 150mb flush */; i++) {
+ KeyValue kv = new KeyValue(k, family, new byte[] { (byte) i }, 0, v);
+ Put p = new Put(k);
+ p.add(kv);
+ this.region.put(p);
+ }
+
+ LOG.info("region memstore size: " + this.region.getMemstoreSize());
+ LOG.info("store memstore size: " + this.region.getStore(family).getMemStoreSize());
+
+ Region.FlushResult fr = this.region.flush(true);
+ assertTrue(fr.isFlushSucceeded());
+ }
+
private Configuration initSplit() {
// Always compact if there is more than one store file.
CONF.setInt("hbase.hstore.compactionThreshold", 2);