-ROOT- or
@@ -412,10 +416,10 @@ public class HTableDescriptor implements WritableComparable+ * A {@link Constraint} must be added to a table before the table is loaded via + * {@link Constraints#add(HTableDescriptor, Class...)} or + * {@link Constraints#add(HTableDescriptor, org.apache.hadoop.hbase.util.Pair...)} + * (if you want to add a configuration with the {@link Constraint}). Constraints + * will be run in the order that they are added. Further, a Constraint will be + * configured before it is run (on load). + *
+ * See {@link Constraints#enableConstraint(HTableDescriptor, Class)} and + * {@link Constraints#disableConstraint(HTableDescriptor, Class)} for + * enabling/disabling of a given {@link Constraint} after it has been added. + *
+ * If a {@link Put} is not valid, the Constraint should throw some sort + * {@link ConstraintException} indicating that the {@link Put} has failed. When + * this exception is thrown, not further retries of the {@link Put} are + * attempted nor are any other {@link Constraint Constraints} attempted (the + * {@link Put} is clearly not valid). Therefore, there are performance + * implications in the order in which {@link BaseConstraint Constraints} are + * specified. + *
+ * If a {@link Constraint} fails to fail the {@link Put} via a + * {@link ConstraintException}, but instead throws a {@link RuntimeException}, + * the entire constraint processing mechanism ({@link ConstraintProcessor}) will + * be unloaded from the table. This ensures that the region server is still + * functional, but not more {@link Put Puts} will be checked via + * {@link Constraint Constraints}. + *
+ * Further, {@link Constraint Constraints} probably not be used to enforce + * cross-table references as it will cause tremendous write slowdowns, but it is + * possible. + *
+ * NOTE: Implementing classes must have a nullary (no-args) constructor + */ +public interface Constraint extends Configurable { + + /** + * Check a {@link Put} to ensure it is valid for the table. If the {@link Put} + * is valid, then just return from the method. Otherwise, throw an + * {@link Exception} specifying what happened. This {@link Exception} is + * propagated back to the client so you can see what caused the {@link Put} to + * fail. + * @param p {@link Put} to check + * @throws ConstraintException when the {@link Put} does not match the + * constraint. + */ + public void check(Put p) throws ConstraintException; + +} diff --git a/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java b/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java new file mode 100644 index 0000000..4ca8442 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java @@ -0,0 +1,48 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.constraint; + +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.client.Put; + +/** + * Exception that a user defined constraint throws on failure of a {@link Put}. + *
+ * Does NOT attempt the {@link Put} multiple times, since the constraint
+ *
+ * This is an ease of use mechanism - all the functionality here could be
+ * implemented on any given system by a coprocessor.
+ */
+public class ConstraintProcessor extends BaseRegionObserver {
+
+ private static final Log LOG = LogFactory.getLog(ConstraintProcessor.class);
+
+ private final ClassLoader classloader;
+
+ private List extends Constraint> constraints = new ArrayList
+ * Stores the current classloader.
+ */
+ public ConstraintProcessor() {
+ classloader = this.getClass().getClassLoader();
+ }
+
+ @Override
+ public void start(CoprocessorEnvironment environment) {
+ if (!(environment instanceof RegionCoprocessorEnvironment))
+ throw new IllegalArgumentException(
+ "Constraints only act on regions - started in an environment that was not a region");
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) environment;
+ HTableDescriptor desc = env.getRegion().getTableDesc();
+ try {
+ this.constraints = Constraints.getConstraints(desc, classloader);
+ } catch (IOException e) {
+ throw new IllegalArgumentException(e);
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Added " + constraints.size() + " constraints");
+ }
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Finished loading user Constraints on table: "
+ + new String(desc.getName()));
+ }
+
+ }
+
+ @Override
+ public void prePut(ObserverContext
+ * Constraints can be added on table load time, via the {@link HTableDescriptor}.
+ *
+ * NOTE: this class is NOT thread safe. Concurrent setting/enabling/disabling of
+ * constraints can cause constraints to be run at incorrect times or not at all.
+ */
+public final class Constraints {
+ private Constraints() {
+ }
+
+ private static final Log LOG = LogFactory.getLog(Constraints.class);
+ private static final String CONSTRAINT_HTD_KEY_PREFIX = "constraint $";
+ private static final Pattern CONSTRAINT_HTD_ATTR_KEY_PATTERN = Pattern
+ .compile(CONSTRAINT_HTD_KEY_PREFIX, Pattern.LITERAL);
+
+ // Has a Configuration bytes
+ private static final byte HAS_CONFIGURATION = 1;
+ private static final byte DOES_N0T_HAVE_CONFIGURATION = 0;
+
+ // If a constraint is enabled bytes
+ private static final byte ENABLED = 1;
+ private static final byte DISABLED = 0;
+
+ private static String COUNTER_KEY = "hbase.constraint.counter";
+
+ /**
+ * Enable constraints on a table.
+ *
+ * Currently, if you attempt to add a constraint to the table, then
+ * Constraints will automatically be turned on.
+ * @param desc table description to add the processor
+ * @throws IOException If the {@link ConstraintProcessor} CP couldn't be added
+ * to the table.
+ */
+ public static void enable(HTableDescriptor desc) throws IOException {
+ // if the CP has already been loaded, do nothing
+ String clazz = ConstraintProcessor.class.getName();
+ if (desc.hasCoprocessor(clazz))
+ return;
+
+ // add the constrain processor CP to the table
+ desc.addCoprocessor(clazz);
+ }
+
+ /**
+ * Turn off processing constraints for a given table, even if constraints have
+ * been turned on or added.
+ * @param desc {@link HTableDescriptor} where to disable {@link Constraint
+ * Constraints}.
+ */
+ public static void disable(HTableDescriptor desc) {
+ desc.removeCoprocessor(ConstraintProcessor.class.getName());
+ }
+
+ /**
+ * Remove all {@link Constraint Constraints} that have been added to the table
+ * and turn off the constraint processing.
+ *
+ * All {@link Configuration Configurations} and their associated
+ * {@link Constraint} are removed.
+ * @param desc {@link HTableDescriptor} to remove {@link Constraint
+ * Constraints} from.
+ */
+ public static void remove(HTableDescriptor desc) {
+ disable(desc);
+ List
+ * This will overwrite any configuration associated with the previous
+ * constraint of the same class.
+ * @param desc {@link HTableDescriptor} to add a {@link Constraint}
+ * @param constraints {@link Constraint Constraints} to add. All constraints
+ * are considered automatically enabled on add
+ * @throws IOException If constraint could not be serialized/added to table
+ */
+ public static void add(HTableDescriptor desc,
+ Class extends Constraint>... constraints) throws IOException {
+ // make sure constraints are enabled
+ enable(desc);
+ long priority = getNextPriority(desc);
+ for (Class extends Constraint> clazz : constraints) {
+ writeConstraint(desc, clazz, null, priority++);
+ }
+ updateLatestPriority(desc, priority);
+ }
+
+ /**
+ * Add constraints and their associated configurations to the table.
+ *
+ * Adding the same constraint class twice will overwrite the first
+ * constraint's configuration
+ * @param desc {@link HTableDescriptor} to add a {@link Constraint}
+ * @param constraints {@link Pair} of a {@link Constraint} and its associated
+ * {@link Configuration}. The Constraint will be configured on load
+ * with the specified configuration.All constraints are considered
+ * automatically enabled on add
+ * @throws IOException if any constraint could not be deserialized. Assumes if
+ * 1 constraint is not loaded properly, something has gone terribly
+ * wrong and that all constraints need to be enforced.
+ */
+ public static void add(HTableDescriptor desc,
+ Pair
+ * Just delegates through to the constraint for checking, but keeps track of
+ * the priority for sorting.
+ */
+ static class OrderedConstraint implements
+ Comparable
+
+ Constraints are designed to be configurable, so a constraints can be used across different tables, but implement different behavior depending on the specific configuration given to that constraint.
+
+ By adding a constraint to a table (see Example Usage), constraints will automatically enabled.
+ You also then have the option of disabling (just 'turn off') or remove (delete all associated information) constraints on a table.
+ If you remove all constraints (see {@link org.apache.hadoop.hbase.constraint.Constraints#remove(org.apache.hadoop.hbase.HTableDescriptor)}, you must re-add any {@link org.apache.hadoop.hbase.constraint.Constraint} you want on that table.
+ However, if they are just disabled (see {@link org.apache.hadoop.hbase.constraint.Constraints#disable(org.apache.hadoop.hbase.HTableDescriptor)}, all you need to do is enable constraints again, and everything will be turned back on.
+ Individual constraints can also be individually enabled, disabled or removed without affecting others.
+
+ By default, constraints are disabled on a table.
+ This means you will not see any slow down on a table if constraints are not enabled.
+
+ NOTE Constraints are run in the order that they are added to a table. This has implications for what order constraints should be added to a table.
+
+ Under the hood, constraints are implemented as a Coprocessor (see {@link org.apache.hadoop.hbase.constraint.ConstraintProcessor} if you are interested).
+
+
+ Let's look at one of the available, built-in constraints, {@link org.apache.hadoop.hbase.constraint.IntegerConstraint} (there are also several simple examples in the tests).
+ The {@link org.apache.hadoop.hbase.constraint.IntegerConstraint} checks to make sure that the value is a String-encoded
+ Note that all exceptions that you expect to be thrown must be caught and then rethrown as a {@link org.apache.hadoop.hbase.constraint.ConstraintException}.
+ This way, you can be sue that a {@link org.apache.hadoop.hbase.client.Put} fails for an expected reason, rather than for any reason.
+ For example, an {@link java.lang.OutOfMemoryError} is probably indicative of an inherent problem in the {@link org.apache.hadoop.hbase.constraint.Constraint}, rather than a failed {@link org.apache.hadoop.hbase.client.Put}.
+
+ If an unexpected exception is thrown (for example, any kind of uncaught {@link java.lang.RuntimeException}), constraint-checking will be 'unloaded' from the regionserver where that error occurred.
+ This means no further {@link org.apache.hadoop.hbase.constraint.Constraint Constraints} will be checked on that server until it is reloaded. This is done to ensure the system remains as available as possible.
+ Therefore, be careful when writing your own Constraint.
+
+ So now that we have a Constraint, we want to add it to a table. It's as easy as:
+
+
+ Once we added the IntegerConstraint, constraints will be enabled on the table (once it is created) and we will always check to make sure that the value is an String-encoded integer.
+
+ However, suppose we also write our own constraint,
+ Suppose that MyConstraint also uses a Configuration (see {@link org.apache.hadoop.hbase.constraint.Constraint#getConf()}).
+ Then adding MyConstraint looks like this:
+
+
+ At this point we added both the {@link org.apache.hadoop.hbase.constraint.IntegerConstraint} and MyConstraint to the table, the {@link org.apache.hadoop.hbase.constraint.IntegerConstraint} will be run first, followed by MyConstraint.
+
+ Suppose we realize that the {@link org.apache.hadoop.conf.Configuration} for MyConstraint is actually wrong when it was added to the table. Note, when it is added to the table, it is not added by reference, but is instead copied into the {@link org.apache.hadoop.hbase.HTableDescriptor}.
+ Thus, to change the {@link org.apache.hadoop.conf.Configuration} we are using for MyConstraint, we need to do this:
+
+
+ This will overwrite the previous configuration for MyConstraint, but not change the order of the constraint nor if it is enabled/disabled.
+
+ Note that the same constraint class can be added multiple times to a table without repercussion.
+ A use case for this is the same constraint working differently based on its configuration.
+
+
+ Suppose then we want to disable just MyConstraint. Its as easy as:
+
+ This just turns off MyConstraint, but retains the position and the configuration associated with MyConstraint.
+ Now, if we want to re-enable the constraint, its just another one-liner:
+
+ Similarly, constraints on the entire table are disabled via:
+
+ Or enabled via:
+
+
+ Lastly, suppose you want to remove MyConstraint from the table, including with position it should be run at and its configuration.
+ This is similarly simple:
+
+ Also, removing all constraints from a table is similarly simple:
+ Table of Contents
+
+
+ Overview
+ Constraints are used to enforce business rules in a database.
+ By checking all {@link org.apache.hadoop.hbase.client.Put Puts} on a given table, you can enforce very specific data policies.
+ For instance, you can ensure that a certain column family-column qualifier pair always has a value between 1 and 10.
+ Otherwise, the {@link org.apache.hadoop.hbase.client.Put} is rejected and the data integrity is maintained.
+ Example usage
+ First, you must define a {@link org.apache.hadoop.hbase.constraint.Constraint}.
+ The best way to do this is to extend {@link org.apache.hadoop.hbase.constraint.BaseConstraint}, which takes care of some of the more mundane details of using a {@link org.apache.hadoop.hbase.constraint.Constraint}.
+ int.
+ It is really simple to implement this kind of constraint, the only method needs to be implemented is {@link org.apache.hadoop.hbase.constraint.Constraint#check(org.apache.hadoop.hbase.client.Put)}:
+
+
+
+ public void check(Put p) throws ConstraintException {
+
+ Map<byte[], List<KeyValue>> familyMap = p.getFamilyMap();
+
+ for (List <KeyValue> kvs : familyMap.values()) {
+ for (KeyValue kv : kvs) {
+
+ // just make sure that we can actually pull out an int
+ // this will automatically throw a NumberFormatException if we try to
+ // store something that isn't an Integer.
+
+ try {
+ Integer.parseInt(new String(kv.getBuffer()));
+ } catch (NumberFormatException e) {
+ throw new ConstraintException("Value in Put (" + p
+ + ") was not a String-encoded integer", e);
+ } } }
+
+ HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
+ ...
+ Constraints.add(desc, IntegerConstraint.class);
+
MyConstraint.java.
+ First, you need to make sure this class-files are in the classpath (in a jar) on the regionserver where that constraint will be run.
+
+ HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
+ Configuration conf = new Configuration();
+ ...
+ (add values to the conf)
+ (modify the table descriptor)
+ ...
+ Constraints.add(desc, new Pair(MyConstraint.class, conf));
+
+ (add/modify the conf)
+ ...
+ Constraints.setConfiguration(desc, MyConstraint.class, conf);
+
+ Constraints.disable(desc, MyConstraint.class);
+
+ Constraints.enable(desc, MyConstraint.class);
+
+ Constraints.disable(desc);
+
+ Constraints.enable(desc);
+
+ Constraints.remove(desc, MyConstraint.class);
+
+ Constraints.remove(desc);
+
Caveats
+ In traditional (SQL) databases, Constraints are often used to enforce referential integrity.
+ However, in HBase, this will likely cause significant overhead and dramatically decrease the number of {@link org.apache.hadoop.hbase.client.Put Puts}/second possible on a table.
+ This is because to check the referential integrity when making a {@link org.apache.hadoop.hbase.client.Put}, one must block on a scan for the 'remote' table, checking for the valid reference.
+ For millions of {@link org.apache.hadoop.hbase.client.Put Puts} a second, this will breakdown very quickly.
+ There are several options around the blocking behavior including, but not limited to:
+
+
+ */
+package org.apache.hadoop.hbase.constraint;
\ No newline at end of file
diff --git a/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java b/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
new file mode 100644
index 0000000..98106ce
--- /dev/null
+++ b/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
@@ -0,0 +1,47 @@
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.junit.Test;
+
+/**
+ * Test setting values in the descriptor
+ */
+public class TestHTableDescriptor {
+
+ /**
+ * Test cps in the table description
+ * @throws Exception
+ */
+ @Test
+ public void testGetSetRemoveCP() throws Exception {
+ HTableDescriptor desc = new HTableDescriptor("table");
+ // simple CP
+ String className = BaseRegionObserver.class.getName();
+ // add and check that it is present
+ desc.addCoprocessor(className);
+ assertTrue(desc.hasCoprocessor(className));
+ // remove it and check that it is gone
+ desc.removeCoprocessor(className);
+ assertFalse(desc.hasCoprocessor(className));
+ }
+
+ /**
+ * Test that we add and remove strings from settings properly.
+ * @throws Exception
+ */
+ @Test
+ public void testRemoveString() throws Exception {
+ HTableDescriptor desc = new HTableDescriptor("table");
+ String key = "Some";
+ String value = "value";
+ desc.setValue(key, value);
+ assertEquals(value, desc.getValue(key));
+ desc.remove(key);
+ assertEquals(null, desc.getValue(key));
+ }
+
+}
diff --git a/src/test/java/org/apache/hadoop/hbase/constraint/AllFailConstraint.java b/src/test/java/org/apache/hadoop/hbase/constraint/AllFailConstraint.java
new file mode 100644
index 0000000..308e3c3
--- /dev/null
+++ b/src/test/java/org/apache/hadoop/hbase/constraint/AllFailConstraint.java
@@ -0,0 +1,33 @@
+/**
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.constraint;
+
+import org.apache.hadoop.hbase.client.Put;
+
+/**
+ * Always fail the put.
+ */
+public class AllFailConstraint extends BaseConstraint {
+
+ @Override
+ public void check(Put p) throws ConstraintException {
+ throw new ConstraintException("AllFailConstraint fails for all puts");
+ }
+}
diff --git a/src/test/java/org/apache/hadoop/hbase/constraint/AllPassConstraint.java b/src/test/java/org/apache/hadoop/hbase/constraint/AllPassConstraint.java
new file mode 100644
index 0000000..1710282
--- /dev/null
+++ b/src/test/java/org/apache/hadoop/hbase/constraint/AllPassConstraint.java
@@ -0,0 +1,34 @@
+/**
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.constraint;
+
+import org.apache.hadoop.hbase.client.Put;
+
+/**
+ * Simple test constraint that always allows the put to pass
+ */
+public class AllPassConstraint extends BaseConstraint {
+
+ @Override
+ public void check(Put p) {
+ // Do nothing - it passes
+ }
+
+}
diff --git a/src/test/java/org/apache/hadoop/hbase/constraint/CheckConfigurationConstraint.java b/src/test/java/org/apache/hadoop/hbase/constraint/CheckConfigurationConstraint.java
new file mode 100644
index 0000000..b6f7b1d
--- /dev/null
+++ b/src/test/java/org/apache/hadoop/hbase/constraint/CheckConfigurationConstraint.java
@@ -0,0 +1,58 @@
+/**
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.constraint;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Put;
+
+/**
+ * Test Constraint to check to make sure the configuration is set
+ */
+public class CheckConfigurationConstraint implements Constraint {
+
+
+ private static String key = "testKey";
+ private static String value = "testValue";
+
+ public static Configuration getConfiguration() {
+ Configuration conf = new Configuration();
+ conf.set(key, value);
+ return conf;
+ }
+
+ @Override
+ public Configuration getConf() {
+ return null;
+ }
+
+ @Override
+ public void check(Put p) {
+ // NOOP
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ String val = conf.get(key);
+ if (val == null || !val.equals(value))
+ throw new IllegalArgumentException(
+ "Configuration was not passed correctly");
+ }
+
+}
diff --git a/src/test/java/org/apache/hadoop/hbase/constraint/IntegrationTestConstraint.java b/src/test/java/org/apache/hadoop/hbase/constraint/IntegrationTestConstraint.java
new file mode 100644
index 0000000..f51fc31
--- /dev/null
+++ b/src/test/java/org/apache/hadoop/hbase/constraint/IntegrationTestConstraint.java
@@ -0,0 +1,259 @@
+/**
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.constraint;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Do the complex integration tests of constraints
+ */
+public class IntegrationTestConstraint {
+ private static final Log LOG = LogFactory
+ .getLog(IntegrationTestConstraint.class);
+
+ private static HBaseTestingUtility util;
+ private static final byte[] tableName = Bytes.toBytes("test");
+ private static final byte[] dummy = Bytes.toBytes("dummy");
+ private static final byte[] row1 = Bytes.toBytes("r1");
+ private static final byte[] test = Bytes.toBytes("test");
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ util = new HBaseTestingUtility();
+ util.startMiniCluster();
+ }
+
+ /**
+ * Test that we run a passing constraint
+ * @throws Exception
+ */
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testConstraintPasses() throws Exception {
+ // create the table
+ // it would be nice if this was also a method on the util
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ for (byte[] family : new byte[][] { dummy, test }) {
+ desc.addFamily(new HColumnDescriptor(family));
+ }
+ // add a constraint
+ Constraints.add(desc, CheckWasRunConstraint.class);
+
+ util.getHBaseAdmin().createTable(desc);
+ HTable table = new HTable(util.getConfiguration(), tableName);
+ table.setAutoFlush(true);
+
+ // test that we don't fail on a valid put
+ Put put = new Put(row1);
+ byte[] value = Integer.toString(10).getBytes();
+ put.add(dummy, new byte[0], value);
+ table.put(put);
+
+ assertTrue(CheckWasRunConstraint.wasRun);
+ }
+
+ /**
+ * Test that constraints will fail properly
+ * @throws Exception
+ */
+ @SuppressWarnings("unchecked")
+ @Test(timeout = 10000)
+ public void testConstraintFails() throws Exception {
+
+ // create the table
+ // it would be nice if this was also a method on the util
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ for (byte[] family : new byte[][] { dummy, test }) {
+ desc.addFamily(new HColumnDescriptor(family));
+ }
+
+ // add a constraint that is sure to fail
+ Constraints.add(desc, AllFailConstraint.class);
+
+ util.getHBaseAdmin().createTable(desc);
+ HTable table = new HTable(util.getConfiguration(), tableName);
+ table.setAutoFlush(true);
+
+ // test that we do fail on violation
+ Put put = new Put(row1);
+ put.add(dummy, new byte[0], "fail".getBytes());
+ LOG.warn("Doing put in table");
+ try {
+ table.put(put);
+ fail("This put should not have suceeded - AllFailConstraint was not run!");
+ } catch (RetriesExhaustedWithDetailsException e) {
+ List