diff --git a/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java b/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java
new file mode 100644
index 0000000..c7b723e
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java
@@ -0,0 +1,29 @@
+package org.apache.hadoop.hbase.constraint;
+
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.Put;
+
+/**
+ * Apply a {@link Constraint} (in traditional database terminology) to a HTable.
+ * Any number of {@link Constraint Constraints} can be added to the table, in
+ * any order. {@link Constraint Constraints} are added by specifying the classes
+ * on the table level as a Coprocessor. For instance, by usng
+ * {@link HTableDescriptor#addCoprocessor(String)}. The order {@link Constraint
+ * Constraints} are specified is the order in which they will be applied.
+ *
+ * If any {@link ConstraintBase} fails, then no further {@link ConstraintBase
+ * Constraints} will be checked. Therefore, there are performance implications
+ * in the order in which {@link ConstraintBase Constraints} are specified.
+ */
+public interface Constraint {
+ /**
+ * Check a {@link Put} to ensure it is valid for the table. If the {@link Put}
+ * is valid, then just return from the method. Otherwise, throw an
+ * {@link Exception} specifying what happened. This {@link Exception} is
+ * propagated back to the client so you can see what caused the {@link Put} to
+ * fail.
+ * @param p {@link Put} to check
+ * @throws Throwable
+ */
+ public void check(Put p) throws Throwable;
+}
diff --git a/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintBase.java b/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintBase.java
new file mode 100644
index 0000000..e09b804
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintBase.java
@@ -0,0 +1,34 @@
+package org.apache.hadoop.hbase.constraint;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+
+/**
+ * Base class to use when actually implementing a {@link Constraint}. It takes
+ * care of making sure exceptions get wrapped and passed back to the client.
+ * Also handles any complexity around not storing keys that have been skipped.
+ */
+public abstract class ConstraintBase extends BaseRegionObserver implements
+ Constraint {
+
+ @Override
+ public void prePut(ObserverContext env,
+ Put put,
+ WALEdit edit, boolean writeToWAL) throws IOException {
+ try {
+ this.check(put);
+ } catch (Throwable e) {
+ // if it threw an exception, then pass that back to the client and fail
+ // the put
+ throw new IOException(e);
+ }
+ // if it was valid, then let it go through
+
+ }
+
+}
diff --git a/src/main/java/org/apache/hadoop/hbase/constraint/IntegerConstraint.java b/src/main/java/org/apache/hadoop/hbase/constraint/IntegerConstraint.java
new file mode 100644
index 0000000..462e5b4
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/hbase/constraint/IntegerConstraint.java
@@ -0,0 +1,27 @@
+package org.apache.hadoop.hbase.constraint;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Put;
+
+/**
+ * Check to make sure that the value in the {@link Put} is an integer, otherwise
+ * reject it.
+ */
+public class IntegerConstraint extends ConstraintBase {
+
+ @Override
+ public void check(Put p) throws Throwable {
+ Map> familyMap = p.getFamilyMap();
+ for (List kvs : familyMap.values()) {
+ for (KeyValue kv : kvs) {
+ // just make sure that we can actually pull out an int
+ // this will automatically throw a NumberFormatException if we try to
+ // store something that isn't an Integer.
+ Integer.parseInt(new String(kv.getBuffer()));
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/apache/hadoop/hbase/constraint/IntegrationTestConstraint.java b/src/test/java/org/apache/hadoop/hbase/constraint/IntegrationTestConstraint.java
new file mode 100644
index 0000000..dd2d3bd
--- /dev/null
+++ b/src/test/java/org/apache/hadoop/hbase/constraint/IntegrationTestConstraint.java
@@ -0,0 +1,122 @@
+package org.apache.hadoop.hbase.constraint;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class IntegrationTestConstraint {
+ private static HBaseTestingUtility util;
+ private static final byte[] tableName = Bytes.toBytes("test");
+ private static final byte[] dummy = Bytes.toBytes("dummy");
+ private static final byte[] row1 = Bytes.toBytes("r1");
+ private static final byte[] row2 = Bytes.toBytes("r2");
+ private static final byte[] row3 = Bytes.toBytes("r3");
+ private static final byte[] test = Bytes.toBytes("test");
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+
+ util = new HBaseTestingUtility();
+
+ // Configuration conf = util.getConfiguration();
+ // conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
+ // AllPassConstraint.class.getName());
+ util.startMiniCluster();
+
+ }
+
+ @Test
+ public void testConstraintPasses() throws Exception {
+ // create the table
+ // it would be nice if this was also a method on the util
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ desc.addCoprocessor(AllPassConstraint.class.getName());
+ for (byte[] family : new byte[][] { dummy, test }) {
+ desc.addFamily(new HColumnDescriptor(family));
+ }
+
+ util.getHBaseAdmin().createTable(desc);
+ HTable table = new HTable(util.getConfiguration(), tableName);
+ table.setAutoFlush(true);
+
+ // test that we don't fail on a valid put
+ Put put = new Put(row1);
+ byte[] value = Integer.toString(10).getBytes();
+ put.add(dummy, new byte[0], value);
+ table.put(put);
+
+ // cleanup
+ util.getHBaseAdmin().disableTable(tableName);
+ util.getHBaseAdmin().deleteTable(tableName);
+ }
+
+ @Test
+ public void testConstraintsFail() throws Exception {
+
+ // create the table
+ // it would be nice if this was also a method on the util
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ desc.addCoprocessor(AllFailConstraint.class.getName());
+ for (byte[] family : new byte[][] { dummy, test }) {
+ desc.addFamily(new HColumnDescriptor(family));
+ }
+
+ util.getHBaseAdmin().createTable(desc);
+ HTable table = new HTable(util.getConfiguration(), tableName);
+ table.setAutoFlush(true);
+
+ // test that we do fail on violation
+ Put put = new Put(row1);
+ put.add(dummy, new byte[0], "fail".getBytes());
+ try {
+ table.put(put);
+ // table.flushCommits();
+ assertTrue(false);
+ } catch (IOException e) {
+ throw e;
+ }
+
+ // cleanup
+ util.getHBaseAdmin().disableTable(tableName);
+ util.getHBaseAdmin().deleteTable(tableName);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ util.shutdownMiniCluster();
+ }
+
+ /**
+ * Constraint that allows all keys to pass
+ */
+ public static class AllPassConstraint extends ConstraintBase {
+
+ @Override
+ public void check(Put p) throws Throwable {
+ // Do Nothing - what ever the put is, it passe
+ }
+
+ }
+
+ /**
+ * Fail on every put
+ */
+ public static class AllFailConstraint extends ConstraintBase {
+
+ @Override
+ public void check(Put p) throws Throwable {
+ throw new RuntimeException("AllFailConstraint fails for all puts");
+ }
+ }
+
+}