Index: hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (revision 1571124)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (working copy)
@@ -34,7 +34,6 @@
import java.util.TreeSet;
import java.util.regex.Matcher;
-import com.google.protobuf.HBaseZeroCopyByteString;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -54,6 +53,7 @@
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.WritableComparable;
+import com.google.protobuf.HBaseZeroCopyByteString;
import com.google.protobuf.InvalidProtocolBufferException;
/**
@@ -655,9 +655,19 @@
}
/**
- * This get the class associated with the region split policy which
+ * This sets the class associated with the region split policy which
* determines when a region split should occur. The class used by
* default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
+ * @param clazz the class name
+ */
+ public void setRegionSplitPolicyClassName(String clazz) {
+ setValue(SPLIT_POLICY, clazz);
+ }
+
+ /**
+ * This gets the class associated with the region split policy which
+ * determines when a region split should occur. The class used by
+ * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
*
* @return the class name of the region split policy for this table.
* If this returns null, the default split policy is used.
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1571124)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy)
@@ -51,6 +51,7 @@
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@@ -210,6 +211,7 @@
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
+import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
@@ -1742,7 +1744,7 @@
HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
checkInitialized();
- checkCompression(hTableDescriptor);
+ sanityCheckTableDescriptor(hTableDescriptor);
if (cpHost != null) {
cpHost.preCreateTable(hTableDescriptor, newRegions);
}
@@ -1756,6 +1758,79 @@
}
+ /**
+ * Checks whether the table conforms to some sane limits, and configured
+ * values (compression, etc) work. Throws an exception if something is wrong.
+ * @throws IOException
+ */
+ private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
+ // check max file size
+ long maxFileSizeLowerLimit = 16 * 1024 * 1024L; // 16M is the default lower limit
+ long maxFileSize = htd.getMaxFileSize();
+ if (maxFileSize < 0) {
+ maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
+ }
+ if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
+ throw new DoNotRetryIOException("MAX_FILESIZE for table descriptor or "
+ + "\"hbase.hregion.max.filesize\" (" + maxFileSize
+ + ") is too small, which might cause over splitting into unmanageable "
+ + "number of regions. Set \"\"hbase.hregion.max.filesize.limit\" to a lower value if "
+ + "you want to bypass this limit");
+ }
+
+ // check flush size
+ long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
+ long flushSize = htd.getMemStoreFlushSize();
+ if (flushSize < 0) {
+ flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
+ }
+ if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
+ throw new DoNotRetryIOException("MEMSTORE_FLUSHSIZE for table descriptor or "
+ + "\"hbase.hregion.memstore.flush.size\" ("+ flushSize
+ + ") is too small, which might cause very frequent flushing. "
+ + "Set \"\"hbase.hregion.memstore.flush.size.limit\" to a lower value if "
+ + "you want to bypass this limit");
+ }
+
+ // check split policy class can be loaded
+ try {
+ RegionSplitPolicy.getSplitPolicyClass(htd, conf);
+ } catch (Exception ex) {
+ throw new DoNotRetryIOException(ex);
+ }
+
+ // check compression can be loaded
+ checkCompression(htd);
+
+ // check that we have at least 1 CF
+ if (htd.getColumnFamilies().length == 0) {
+ throw new DoNotRetryIOException("Table should have at least one column family");
+ }
+
+ for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+ // check blockSize
+ if (hcd.getBlocksize() <= 0) {
+ throw new DoNotRetryIOException("Block size for column family " + hcd.getNameAsString()
+ + " must be positive");
+ }
+
+ // check versions
+ if (hcd.getMinVersions() < 0) {
+ throw new DoNotRetryIOException("Min versions for column family " + hcd.getNameAsString()
+ + " must be positive");
+ }
+ // max versions already being checked
+
+ // check replication scope
+ if (hcd.getScope() < 0) {
+ throw new DoNotRetryIOException("Replication scope for column family " + hcd.getNameAsString()
+ + " must be positive");
+ }
+
+ // TODO: should we check coprocessors and encryption ?
+ }
+ }
+
private void checkCompression(final HTableDescriptor htd)
throws IOException {
if (!this.masterCheckCompression) return;
@@ -2036,7 +2111,7 @@
public void modifyTable(final TableName tableName, final HTableDescriptor descriptor)
throws IOException {
checkInitialized();
- checkCompression(descriptor);
+ sanityCheckTableDescriptor(descriptor);
if (cpHost != null) {
cpHost.preModifyTable(tableName, descriptor);
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java (revision 1571124)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java (working copy)
@@ -105,7 +105,7 @@
return policy;
}
- static Class extends RegionSplitPolicy> getSplitPolicyClass(
+ public static Class extends RegionSplitPolicy> getSplitPolicyClass(
HTableDescriptor htd, Configuration conf) throws IOException {
String className = htd.getRegionSplitPolicyClassName();
if (className == null) {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (revision 1571124)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (working copy)
@@ -5396,6 +5396,61 @@
}
@Test
+ public void testIllegalTableDescriptor() throws Exception {
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testIllegalTableDescriptor"));
+ HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
+
+ // create table with 0 families
+ checkTableIsIllegal(htd);
+ htd.addFamily(hcd);
+
+ htd.setMaxFileSize(1024); // 1K
+ checkTableIsIllegal(htd);
+ htd.setMaxFileSize(0);
+ checkTableIsIllegal(htd);
+ htd.setMaxFileSize(1024 * 1024 * 1024); // 1G
+
+ htd.setMemStoreFlushSize(1024);
+ checkTableIsIllegal(htd);
+ htd.setMemStoreFlushSize(0);
+ checkTableIsIllegal(htd);
+ htd.setMemStoreFlushSize(128 * 1024 * 1024); // 128M
+
+ htd.setRegionSplitPolicyClassName("nonexisting.foo.class");
+ checkTableIsIllegal(htd);
+ htd.setRegionSplitPolicyClassName(null);
+
+ hcd.setBlocksize(0);
+ checkTableIsIllegal(htd);
+ hcd.setBlocksize(1);
+
+ hcd.setMinVersions(-1);
+ checkTableIsIllegal(htd);
+ hcd.setMinVersions(3);
+ try {
+ hcd.setMaxVersions(2);
+ fail();
+ } catch (IllegalArgumentException ex) {
+ // expected
+ hcd.setMaxVersions(10);
+ }
+
+ hcd.setScope(-1);
+ checkTableIsIllegal(htd);
+ }
+
+ private void checkTableIsIllegal(HTableDescriptor htd) throws IOException {
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ try {
+ admin.createTable(htd);
+ fail();
+ } catch(Exception ex) {
+ // should throw ex
+ }
+ assertFalse(admin.tableExists(htd.getTableName()));
+ }
+
+ @Test
public void testRawScanRespectsVersions() throws Exception {
byte[] TABLE = Bytes.toBytes("testRawScan");
HTable table = TEST_UTIL.createTable(TABLE, new byte[][] { FAMILY });
Index: hbase-server/src/test/resources/hbase-site.xml
===================================================================
--- hbase-server/src/test/resources/hbase-site.xml (revision 1571124)
+++ hbase-server/src/test/resources/hbase-site.xml (working copy)
@@ -141,4 +141,16 @@
version is X.X.X-SNAPSHOT"
+
+ hbase.hregion.max.filesize.limit
+ 0
+ region max file size limit check is not enforced for tests
+
+
+
+ hbase.hregion.memstore.flush.size.limit
+ 0
+ memstore flush size limit check is not enforced for tests
+
+