Index: conf/hbase-default.xml
===================================================================
--- conf/hbase-default.xml (revision 885113)
+++ conf/hbase-default.xml (working copy)
@@ -137,6 +137,17 @@
+ hbase.client.keyvalue.maxsize
+ -1
+ Specifies the combined maximum allowed size of a KeyValue
+ instance. This is to set an upper boundary for a single entry saved in a
+ storage file. Since they cannot be split it helps avoiding that a region
+ cannot be split any further because the data is too large. It seems wise
+ to set this to a fraction of the maximum region size. Setting it to zero
+ or less disables the check.
+
+
+
hbase.regionserver.lease.period
60000
HRegion server lease period in milliseconds. Default is
Index: src/java/org/apache/hadoop/hbase/client/HTable.java
===================================================================
--- src/java/org/apache/hadoop/hbase/client/HTable.java (revision 885113)
+++ src/java/org/apache/hadoop/hbase/client/HTable.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
@@ -62,6 +63,7 @@
private boolean autoFlush;
private long currentWriteBufferSize;
protected int scannerCaching;
+ private int maxKeyValueSize;
/**
* Creates an object to access a HBase table
@@ -121,6 +123,7 @@
this.autoFlush = true;
this.currentWriteBufferSize = 0;
this.scannerCaching = conf.getInt("hbase.client.scanner.caching", 1);
+ this.maxKeyValueSize = conf.getInt("hbase.client.keyvalue.maxsize", -1);
}
/**
@@ -602,9 +605,18 @@
* @throws IllegalArgumentException
*/
private void validatePut(final Put put) throws IllegalArgumentException{
- if(put.isEmpty()) {
+ if (put.isEmpty()) {
throw new IllegalArgumentException("No columns to insert");
}
+ if (maxKeyValueSize > 0) {
+ for (List list : put.getFamilyMap().values()) {
+ for (KeyValue kv : list) {
+ if (kv.getLength() > maxKeyValueSize) {
+ throw new IllegalArgumentException("KeyValue size too large");
+ }
+ }
+ }
+ }
}
/**
Index: src/test/org/apache/hadoop/hbase/client/TestFromClientSide.java
===================================================================
--- src/test/org/apache/hadoop/hbase/client/TestFromClientSide.java (revision 885113)
+++ src/test/org/apache/hadoop/hbase/client/TestFromClientSide.java (working copy)
@@ -22,6 +22,7 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.ArrayList;
@@ -32,6 +33,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@@ -39,8 +41,6 @@
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
@@ -401,6 +401,28 @@
}
@Test
+ public void testMaxKeyValueSize() throws Exception {
+ byte [] TABLE = Bytes.toBytes("testMaxKeyValueSize");
+ HBaseConfiguration conf = TEST_UTIL.getConfiguration();
+ String oldMaxSize = conf.get("hbase.client.keyvalue.maxsize");
+ HTable ht = TEST_UTIL.createTable(TABLE, FAMILY);
+ byte[] value = new byte[4 * 1024 * 1024];
+ Put put = new Put(ROW);
+ put.add(FAMILY, QUALIFIER, value);
+ ht.put(put);
+ try {
+ conf.setInt("hbase.client.keyvalue.maxsize", 2 * 1024 * 1024);
+ TABLE = Bytes.toBytes("testMaxKeyValueSize2");
+ ht = TEST_UTIL.createTable(TABLE, FAMILY);
+ put = new Put(ROW);
+ put.add(FAMILY, QUALIFIER, value);
+ ht.put(put);
+ fail("Inserting a too large KeyValue worked, should throw exception");
+ } catch(Exception e) {}
+ conf.set("hbase.client.keyvalue.maxsize", oldMaxSize);
+ }
+
+ @Test
public void testFilters() throws Exception {
byte [] TABLE = Bytes.toBytes("testFilters");
HTable ht = TEST_UTIL.createTable(TABLE, FAMILY);
@@ -857,13 +879,13 @@
// Null table name (should NOT work)
try {
TEST_UTIL.createTable(null, FAMILY);
- throw new IOException("Creating a table with null name passed, should have failed");
+ fail("Creating a table with null name passed, should have failed");
} catch(Exception e) {}
// Null family (should NOT work)
try {
TEST_UTIL.createTable(TABLE, (byte[])null);
- throw new IOException("Creating a table with a null family passed, should fail");
+ fail("Creating a table with a null family passed, should fail");
} catch(Exception e) {}
HTable ht = TEST_UTIL.createTable(TABLE, FAMILY);
@@ -873,7 +895,7 @@
Put put = new Put((byte[])null);
put.add(FAMILY, QUALIFIER, VALUE);
ht.put(put);
- throw new IOException("Inserting a null row worked, should throw exception");
+ fail("Inserting a null row worked, should throw exception");
} catch(Exception e) {}
// Null qualifier (should work)