commit c6c682fd0b60f923ede922820c2634061a942c10 Author: karthick Date: Tue May 3 19:37:02 2011 -0400 HBASE-3851 A Random-Access Column Object Model Change-Id: I88909c151e1b38c03012cf90412b03980ac2b14a diff --git a/src/main/java/org/apache/hadoop/hbase/client/ColumnList.java b/src/main/java/org/apache/hadoop/hbase/client/ColumnList.java new file mode 100644 index 0000000..79ace47 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/client/ColumnList.java @@ -0,0 +1,154 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.ListIterator; +import java.util.Set; + +import org.apache.hadoop.hbase.util.Bytes; + +/** + * The ColumnList implements the {@link List} and + * {@link ColumnObject} interfaces such that each element in the list is stored + * against a property whose name is the ordinal position of that element. + */ +public class ColumnList extends ArrayList implements ColumnObject, + List { + + private static final long serialVersionUID = 899891722343011142L; + + private byte[] id; + + public ColumnList(byte[] id) { + this.id = id; + } + + @Override + public byte[] getId() { + return id; + } + + @Override + public Set getPropertyNames() { + Set propertyNames = new LinkedHashSet(); + for (int ordinal = 0; ordinal < size(); ordinal++) { + propertyNames.add(Bytes.toBytes(ordinal)); + } + return Collections.unmodifiableSet(propertyNames); + } + + @Override + public byte[] getPropertyValue(byte[] propertyName) { + if (propertyName == null) { + return null; + } + int ordinal = Bytes.toInt(propertyName); + return ordinal < size() ? get(ordinal) : null; + } + + @Override + public byte[] set(int index, byte[] element) { + while (index >= size()) { + super.add(null); + } + return super.set(index, element); + } + + public static ColumnList valueOf(ColumnObject columnObject) { + ColumnList columnList = new ColumnList(columnObject.getId()); + Set propertyNames = columnObject.getPropertyNames(); + for (byte[] propertyName : propertyNames) { + columnList.set(Bytes.toInt(propertyName), + columnObject.getPropertyValue(propertyName)); + } + return columnList; + } + + public static ColumnList valueOf(byte[] id, List list) { + ColumnList columnList = new ColumnList(id); + for (byte[] value : list) { + columnList.add(value); + } + return columnList; + } + + @Override + public int indexOf(Object o) { + if (o != null && o instanceof byte[]) { + for (int i = 0; i < size(); i++) + if (Bytes.equals((byte[]) o, get(i))) + return i; + } + return super.indexOf(o); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + Arrays.hashCode(id); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (getClass() != obj.getClass()) + return false; + ColumnList that = (ColumnList) obj; + if (!Arrays.equals(this.id, that.id)) + return false; + if (this.size() != that.size()) { + return false; + } + ListIterator e1 = listIterator(); + ListIterator e2 = ((List) that).listIterator(); + while (e1.hasNext() && e2.hasNext()) { + byte[] o1 = e1.next(); + byte[] o2 = e2.next(); + if (!(o1 == null ? o2 == null : Bytes.equals(o1, o2))) + return false; + } + return !(e1.hasNext() || e2.hasNext()); + } + + @Override + public String toString() { + StringBuffer buffer = new StringBuffer(); + buffer.append("["); + boolean firstEntry = true; + for (byte[] entry : this) { + if (!firstEntry) { + buffer.append(", "); + firstEntry = false; + } + buffer.append(Bytes.toString(entry)); + } + buffer.append("]"); + return buffer.toString(); + } + +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/client/ColumnMap.java b/src/main/java/org/apache/hadoop/hbase/client/ColumnMap.java new file mode 100644 index 0000000..ff8ef71 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/client/ColumnMap.java @@ -0,0 +1,139 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +import org.apache.hadoop.hbase.util.Bytes; + +/** + * The ColumnMap implements the {@link Map} and + * {@link ColumnObject} interfaces such that each {@link Map.Entry} is treated + * as an object property. + */ +public class ColumnMap extends TreeMap implements ColumnObject, + Map { + + private static final long serialVersionUID = 899891722343011142L; + + private byte[] id; + + public ColumnMap(byte[] id) { + super(Bytes.BYTES_COMPARATOR); + this.id = id; + } + + @Override + public byte[] getId() { + return id; + } + + @Override + public Set getPropertyNames() { + Set propertyNames = new HashSet(); + propertyNames.addAll(keySet()); + return Collections.unmodifiableSet(propertyNames); + } + + @Override + public byte[] getPropertyValue(byte[] propertyName) { + return get(propertyName); + } + + public static ColumnMap valueOf(ColumnObject columnObject) { + ColumnMap columnMap = new ColumnMap(columnObject.getId()); + for (byte[] propertyName : columnObject.getPropertyNames()) { + byte[] propertyValue = columnObject.getPropertyValue(propertyName); + columnMap.put(propertyName, propertyValue); + } + return columnMap; + } + + public static ColumnMap valueOf(byte[] id, Map map) { + ColumnMap columnMap = new ColumnMap(id); + columnMap.putAll(map); + return columnMap; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + Arrays.hashCode(id); + return result; + } + + @Override + public boolean equals(Object object) { + if (object == null) { + return false; + } + if (!(object instanceof ColumnMap)) { + return false; + } + ColumnMap that = (ColumnMap) object; + if (this.size() != that.size()) { + return false; + } + if (!Bytes.equals(id, that.id)) + return false; + + try { + for (Map.Entry entry : this.entrySet()) { + byte[] key = entry.getKey(); + byte[] value = entry.getValue(); + if (value == null) { + if (!(that.get(key) == null && that.containsKey(key))) { + return false; + } else { + if (!Bytes.equals(value, that.get(key))) { + return false; + } + } + } + } + } catch (Exception e) { + return false; + } + return true; + } + + @Override + public String toString() { + StringBuffer buffer = new StringBuffer(); + buffer.append("{"); + boolean firstEntry = true; + for (Map.Entry entry : entrySet()) { + if (!firstEntry) { + buffer.append(", "); + firstEntry = false; + } + buffer.append(Bytes.toString(entry.getKey())).append(" : ") + .append(entry.getValue()); + } + buffer.append("}"); + return buffer.toString(); + } +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/client/ColumnObject.java b/src/main/java/org/apache/hadoop/hbase/client/ColumnObject.java new file mode 100644 index 0000000..0607afc --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/client/ColumnObject.java @@ -0,0 +1,48 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.RandomAccess; +import java.util.Set; + +import org.apache.hadoop.hbase.KeyValue; + +/** + * The ColumnObject class denotes an arbitrary type that is made up + * of a set of properties, where each property is a tuple. + * + *

+ * In essence, each property of an object maps to a distinct {@link KeyValue}. + * In particular, the property's name maps to a column, prefixed by the + * qualifier and the object's identifier, which is assumed to be unique in the + * scope of a given column family. The property's value is stored as the + * {@link KeyValue#getValue()} of the corresponding column. + */ +public interface ColumnObject extends RandomAccess { + // A value that denotes that the corresponding property is to be deleted. + public static final byte[] VALUE_IS_DELETED = new byte[] {}; + + public byte[] getId(); + + public Set getPropertyNames(); + + public byte[] getPropertyValue(byte[] propertyName); + +} diff --git a/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectDelete.java b/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectDelete.java new file mode 100644 index 0000000..2590f06 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectDelete.java @@ -0,0 +1,232 @@ +/* + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.Writable; + +/** + * Used to perform Delete operations on a single row, and the specified + * {@link ColumnObject}s. + *

+ * To delete an entire row, instantiate a ColumnObjectDelete object with the row + * to delete. To further define the scope of what to delete, perform additional + * methods as outlined below. + *

+ * To delete specific column object properties, execute + * {@link #deleteColumn(byte[]) family, byte[] qualifier, byte[] objectId, + * byte[] property)} for each object property to delete. + *

+ * To delete an entire column object, execute + * {@link #deleteColumnObject(byte[]) family, byte[] qualifier, byte[] + * objectId)} for each family to delete. + *

+ *

+ * To delete column object properties based on whether their values are equal to + * {@link ColumnObject#VALUE_IS_DELETED}, execute + * {@link #deleteColumnObject(byte[]) family, byte[] qualifier, byte[] objectId, + * boolean valueBased)} for each family to delete. + *

+ */ +public class ColumnObjectDelete extends Delete + implements Writable, Row, Comparable { + /** Constructor for Writable. DO NOT USE */ + public ColumnObjectDelete() { + super(); + } + + /** + * Create a ColumnObjectDelete operation for the specified row. + * + * @param row + * row key + */ + public ColumnObjectDelete(byte[] row) { + super(row); + } + + /** + * Create a ColumnObjectDelete operation for the specified row, using an + * existing row lock. + * + * @param row + * row key + * @param timestamp + * maximum version timestamp (only for delete row) + * @param rowLock + * previously acquired row lock, or null + */ + public ColumnObjectDelete(byte[] row, long timestamp, RowLock rowLock) { + super(row, timestamp, rowLock); + } + + /** + * Copy constructor. Creates a ColumnObjectDelete operation cloned from the + * specified Delete. + * + * @param deleteToCopy + * delete to copy + */ + public ColumnObjectDelete(ColumnObjectDelete deleteToCopy) { + super(deleteToCopy); + } + + /** + * Delete all versions of the column specific to the given object property. + * + * @param family + * family name + * @param qualifier + * column qualifier + * @param objectId + * object identifier + * @param property + * object property + * @return this for invocation chaining + */ + public ColumnObjectDelete deleteProperty(byte[] family, + byte[] qualifier, byte[] objectId, byte[] property) { + byte[] qualifiedKey = ColumnQualifiers.concatSegments(qualifier, objectId, + property); + super.deleteColumn(family, qualifiedKey); + return this; + } + + /** + * Delete all versions of the column specific to the given object property, + * with a timestamp less than or equal to the specified timestamp. + * + * @param family + * family name + * @param qualifier + * column qualifier + * @param timestamp + * maximum version timestamp + * @param objectId + * object identifier + * @param property + * object property + * @return this for invocation chaining + */ + public ColumnObjectDelete deleteProperty(byte[] family, + byte[] qualifier, long ts, byte[] objectId, byte[] property) { + byte[] qualifiedKey = ColumnQualifiers.concatSegments(qualifier, objectId, + property); + super.deleteColumn(family, qualifiedKey, ts); + return this; + } + + /** + * Delete all versions of the column specific to the given object. + * + * @param family + * family name + * @param qualifier + * column qualifier + * @param objectId + * object identifier + * @return this for invocation chaining + */ + public ColumnObjectDelete deleteObject(byte[] family, + byte[] qualifier, ColumnObject object) { + return deleteObject(family, qualifier, object, false); + } + + /** + * Delete all versions of the column specific to the given object based on the + * values of its properties. + * + * @param family + * family name + * @param qualifier + * column qualifier + * @param objectId + * object identifier + * @param valueBased + * if true, delete property if and only if marked for delete + * @return this for invocation chaining + */ + public ColumnObjectDelete deleteObject(byte[] family, + byte[] qualifier, ColumnObject object, boolean valueBased) { + byte[] objectId = object.getId(); + for (byte[] propertyName : object.getPropertyNames()) { + if (valueBased) { + byte[] propertyValue = object.getPropertyValue(propertyName); + if (!Bytes.equals(ColumnObject.VALUE_IS_DELETED, propertyValue)) { + continue; + } + } + deleteProperty(family, qualifier, objectId, propertyName); + } + return this; + } + + /** + * Delete all versions of the column specific to the given object, with a + * timestamp less than or equal to the specified timestamp. + * + * @param family + * family name + * @param qualifier + * column qualifier + * @param timestamp + * maximum version timestamp + * @param objectId + * object identifier + * @return this for invocation chaining + */ + public ColumnObjectDelete deleteObject(byte[] family, + byte[] qualifier, long ts, ColumnObject object) { + return deleteObject(family, qualifier, ts, object, false); + } + + /** + * Delete all versions of the column specific to the given object based on the + * values of its properties, with a timestamp less than or equal to the + * specified timestamp. + * + * @param family + * family name + * @param qualifier + * column qualifier + * @param timestamp + * maximum version timestamp + * @param objectId + * object identifier + * @param valueBased + * if true, delete property if and only if marked for delete + * @return this for invocation chaining + */ + public ColumnObjectDelete deleteObject(byte[] family, + byte[] qualifier, long ts, ColumnObject object, boolean valueBased) { + byte[] objectId = object.getId(); + for (byte[] propertyName : object.getPropertyNames()) { + if (valueBased) { + byte[] propertyValue = object.getPropertyValue(propertyName); + if (!Bytes.equals(ColumnObject.VALUE_IS_DELETED, propertyValue)) { + continue; + } + } + deleteProperty(family, qualifier, ts, objectId, propertyName); + } + return this; + } + +} diff --git a/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectGet.java b/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectGet.java new file mode 100644 index 0000000..15f2034 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectGet.java @@ -0,0 +1,155 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.Map; +import java.util.Map.Entry; +import java.util.NavigableSet; +import java.util.TreeMap; +import java.util.TreeSet; + +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.QualifierFilter; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.Writable; + +/** + * Used to perform Get operations on a single row. + *

+ * To get everything for a row, instantiate a Get object with the row to get. To + * further define the scope of what to get, perform additional methods as + * outlined below. + *

+ * To get all columns from specific objects, execute + * {@link #addObject(byte[], byte[], byte[])} for each object to retrieve. + *

+ * To get specific properties, execute + * {@link #addProperty(byte[], byte[], byte[], byte[])} for each object property + * to retrieve. + *

+ * To only all objects columns, execute {@link #addObjects(byte[], byte[])}. + *

+ * To limit the number of versions of each column to be returned, execute + * {@link #setMaxVersions(int) setMaxVersions}. + *

+ * To add a filter, execute {@link #setFilter(Filter) setFilter}. + */ +public class ColumnObjectGet extends Get implements + Writable { + /** Constructor for Writable. DO NOT USE */ + public ColumnObjectGet() { + } + + /** + * Create a Get operation for the specified row. + *

+ * If no further operations are done, this will get the latest version of all + * columns in all families of the specified row. + * + * @param row + * row key + */ + public ColumnObjectGet(byte[] row) { + super(row, null); + } + + /** + * Create a Get operation for the specified row, using an existing row lock. + *

+ * If no further operations are done, this will get the latest version of all + * columns in all families of the specified row. + * + * @param row + * row key + * @param rowLock + * previously acquired row lock, or null + */ + public ColumnObjectGet(byte[] row, RowLock rowLock) { + super(row, rowLock); + } + + public ColumnObjectGet addObject(byte[] family, byte[] qualifier, + byte[] objectId) { + return (ColumnObjectGet) addObjects(family, + ColumnQualifiers.concatSegments(qualifier, objectId)); + } + + public ColumnObjectGet addObjects(byte[] family, byte[] qualifier) { + return addObjects(family, qualifier, true); + } + + @SuppressWarnings("deprecation") + public ColumnObjectGet addObjects(byte[] family, byte[] qualifier, + boolean prefix) { + if (prefix) { + addColumn(family); + setFilter(new QualifierFilter(CompareOp.EQUAL, + ColumnQualifiers.getComparator(qualifier))); + + } else { + super.addColumn(family, qualifier); + } + return this; + } + + @SuppressWarnings("unchecked") + public ColumnObjectGet addProperty(byte[] family, byte[] qualifier, + byte[] objectId, byte[] propertyName) { + return (ColumnObjectGet) addColumn(family, + ColumnQualifiers.concatSegments(qualifier, objectId, propertyName)); + } + + public Map>> getFamilyObjectMap() { + Map> familyMap = getFamilyMap(); + Map>> columnObjects = new TreeMap>>( + Bytes.BYTES_COMPARATOR); + for (Entry> entry : familyMap.entrySet()) { + byte[] family = entry.getKey(); + NavigableSet columns = entry.getValue(); + + Map> objects = new TreeMap>( + Bytes.BYTES_COMPARATOR); + byte[] previousName = null; + for (byte[] qualifier : columns) { + byte[][] qualifierParts = ColumnQualifiers.extractSegments(qualifier); + byte[] objectName = qualifierParts[1]; + if (previousName == null + || Bytes.compareTo(objectName, previousName) != 0) { + previousName = objectName; + } + NavigableSet mapKeys = objects.get(objectName); + if (mapKeys == null) { + mapKeys = new TreeSet(Bytes.BYTES_COMPARATOR); + objects.put(objectName, mapKeys); + } + byte[] objectKey = qualifierParts[1]; + mapKeys.add(objectKey); + } + columnObjects.put(family, objects); + } + + return columnObjects; + } + + public Map> getFamilyObjects(String objectKey) { + return getFamilyObjectMap().get(objectKey); + } +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectPut.java b/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectPut.java new file mode 100644 index 0000000..f4af511 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectPut.java @@ -0,0 +1,110 @@ +/* + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.io.Writable; + +/** + * Used to perform Put operations for a single row and specified column + * object(s). + *

+ * To perform a {@link Put} of a {@link ColumnObject}, instantiate a + * ColumnObjectPut object with the row to insert to and for each column object + * to be inserted, execute {@link #add(byte[] family, byte[] qualifier, + * ColumnObject object} or {@link #add(byte[] family, byte[] qualifier, long ts, + * ColumnObject object} if setting the timestamp. + */ +public class ColumnObjectPut extends Put implements + HeapSize, Writable, Row, Comparable { + /** Constructor for Writable. DO NOT USE */ + public ColumnObjectPut() { + super(); + } + + /** + * Create a Put operation for the specified row. + * + * @param row + * row key + */ + public ColumnObjectPut(byte[] row) { + super(row); + } + + /** + * Create a Put operation for the specified row, using an existing row lock. + * + * @param row + * row key + * @param rowLock + * previously acquired row lock, or null + */ + public ColumnObjectPut(byte[] row, RowLock rowLock) { + super(row, rowLock); + } + + /** + * Copy constructor. Creates a Put operation cloned from the specified Put. + * + * @param putToCopy + * put to copy + */ + public ColumnObjectPut(ColumnObjectPut putToCopy) { + super(putToCopy); + } + + public ColumnObjectPut add(byte[] family, byte[] qualifier, + byte[] objectId, byte[] property, byte[] objectValue) { + byte[] qualifiedKey = ColumnQualifiers.concatSegments(qualifier, objectId, + property); + super.add(family, qualifiedKey, objectValue); + return this; + } + + public ColumnObjectPut add(byte[] family, byte[] qualifier, long ts, + byte[] objectId, byte[] property, byte[] objectValue) { + byte[] qualifiedKey = ColumnQualifiers.concatSegments(qualifier, objectId, + property); + super.add(family, qualifiedKey, ts, objectValue); + return this; + } + + public ColumnObjectPut add(byte[] family, byte[] qualifier, + ColumnObject object) { + byte[] objectId = object.getId(); + for (byte[] propertyName : object.getPropertyNames()) { + byte[] propertyValue = object.getPropertyValue(propertyName); + add(family, qualifier, objectId, propertyName, propertyValue); + } + return this; + } + + public ColumnObjectPut add(byte[] family, byte[] qualifier, long ts, + ColumnObject object) { + byte[] objectId = object.getId(); + for (byte[] propertyName : object.getPropertyNames()) { + byte[] propertyValue = object.getPropertyValue(propertyName); + add(family, qualifier, ts, objectId, propertyName, propertyValue); + } + return this; + } + +} diff --git a/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectResult.java b/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectResult.java new file mode 100644 index 0000000..1c7c490 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/client/ColumnObjectResult.java @@ -0,0 +1,102 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; +import java.util.TreeMap; + +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Single row result of a {@link ColumnObjectGet} query. + *

+ * + *

+ * To get all the properties for a specific object, use + * {@link #getProperties(byte[], byte[])}. + * + *

+ * To get a specific property, use + * {@link #getPropertyValue(byte[], byte[], byte[], byte[])}. + **/ +public class ColumnObjectResult extends Result { + public static final int QUALIFIER_INDEX_OBJECT_ID = 1; + public static final int QUALIFIER_INDEX_PROPERTY_NAME = 2; + + public ColumnObjectResult(Result result) { + super(result.raw()); + } + + public byte[] getPropertyValue(byte[] family, byte[] qualifier, + byte[] objectId, byte[] propertyName) { + byte[] qualifiedKey = ColumnQualifiers.concatSegments(qualifier, objectId, + propertyName); + return super.getValue(family, qualifiedKey); + } + + public Map getProperties(byte[] family, byte[] objectId) { + NavigableMap familyMap = getFamilyMap(family); + NavigableMap propertyMap = new TreeMap( + Bytes.BYTES_COMPARATOR); + if (familyMap == null) { + return propertyMap; + } + for (Map.Entry entry : familyMap.entrySet()) { + byte[] qualifier = entry.getKey(); + byte[] entrysObjectId = ColumnQualifiers.getSegment(qualifier, + QUALIFIER_INDEX_OBJECT_ID); + if (!Bytes.equals(objectId, entrysObjectId)) { + continue; + } + byte[] propertyName = ColumnQualifiers.getSegment(qualifier, + QUALIFIER_INDEX_PROPERTY_NAME); + byte[] propertyValue = entry.getValue(); + propertyMap.put(propertyName, propertyValue); + } + return propertyMap; + } + + public ColumnObject getColumnObject(byte[] family, final byte[] objectId) { + final Map properties = getProperties(family, objectId); + return new ColumnObject() { + @Override + public byte[] getId() { + return objectId; + } + + @Override + public Set getPropertyNames() { + Set propertyNames = new HashSet(); + propertyNames.addAll(properties.keySet()); + return Collections.unmodifiableSet(propertyNames); + } + + @Override + public byte[] getPropertyValue(byte[] propertyName) { + return properties.get(propertyName); + } + }; + } + +} diff --git a/src/main/java/org/apache/hadoop/hbase/client/ColumnQualifiers.java b/src/main/java/org/apache/hadoop/hbase/client/ColumnQualifiers.java new file mode 100644 index 0000000..5f05051 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/client/ColumnQualifiers.java @@ -0,0 +1,90 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hbase.filter.RegexStringComparator; +import org.apache.hadoop.hbase.filter.WritableByteArrayComparable; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * This is a utility class that is used to split (join) the column qualifier + * into (with) one or more byte-array segments, where the segments are separated + * by "::". Because the ":" token appears in the full column name, it is safe to + * say that it cannot appear in the family or qualifier. The reason the colon + * appears twice in the segment separator is so that it is not confused with the + * separator between the family and the qualifier. + */ +public class ColumnQualifiers { + + public static final String QUALIFIER_SEGMENT_SEPARATOR = "::"; + + public static byte[] concatSegments(byte[]... segments) { + byte[] qualifier = null; + for (byte[] segment : segments) { + qualifier = (qualifier == null) ? segment : Bytes.add(qualifier, + Bytes.toBytes(QUALIFIER_SEGMENT_SEPARATOR), segment); + } + return qualifier; + } + + public static byte[][] extractSegments(byte[] qualifier) { + String[] tokens = Bytes.toString(qualifier).split( + QUALIFIER_SEGMENT_SEPARATOR); + List segments = new ArrayList(); + for (String token : tokens) { + segments.add(Bytes.toBytes(token)); + } + return segments.toArray(new byte[][] {}); + } + + public static WritableByteArrayComparable getComparator(byte[] qualifier) { + return new RegexStringComparator(Bytes.toString(concatSegments(qualifier, + Bytes.toBytes(".*")))); + } + + public static byte[] firstSegment(byte[] qualifier) { + byte[][] segments = extractSegments(qualifier); + return segments != null && segments.length > 0 ? segments[0] : null; + } + + public static byte[] getSegment(byte[] qualifier, int index) { + byte[][] segments = extractSegments(qualifier); + return (segments != null && segments.length > index) ? segments[index] + : null; + } + + public static byte[] lastSegment(byte[] qualifier) { + byte[][] segments = extractSegments(qualifier); + return segments != null && segments.length > 0 ? segments[segments.length - 1] + : null; + } + + public static boolean startsWith(byte[] qualifier, byte[] segment) { + return (segment != null && segment.equals(firstSegment(qualifier))); + } + + public static boolean endsWith(byte[] qualifier, byte[] segment) { + return (segment != null && segment.equals(lastSegment(qualifier))); + } + +} diff --git a/src/main/java/org/apache/hadoop/hbase/client/ColumnSet.java b/src/main/java/org/apache/hadoop/hbase/client/ColumnSet.java new file mode 100644 index 0000000..49a9a63 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/client/ColumnSet.java @@ -0,0 +1,94 @@ +package org.apache.hadoop.hbase.client; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import java.util.TreeSet; + +import org.apache.hadoop.hbase.util.Bytes; + +/** + * The ColumnSet implements the {@link Set} and + * {@link ColumnObject} interfaces such that each element in the set is stored + * as the property name (as well as its value), which ensures that concurrent, + * distributed updates will not introduce duplicates into the set. + */ +public class ColumnSet extends TreeSet implements ColumnObject, + Set { + + private static final long serialVersionUID = 899891722343011142L; + + private byte[] id; + + public ColumnSet(byte[] id) { + super(Bytes.BYTES_COMPARATOR); + this.id = id; + } + + @Override + public byte[] getId() { + return id; + } + + public Set getPropertyNames() { + Set propertyNames = new HashSet(); + propertyNames.addAll(this); + return Collections.unmodifiableSet(propertyNames); + } + + @Override + public byte[] getPropertyValue(byte[] propertyName) { + return super.contains(propertyName) ? propertyName : null; + } + + public static ColumnSet valueOf(ColumnObject columnObject) { + ColumnSet columnSet = new ColumnSet(columnObject.getId()); + columnSet.addAll(columnObject.getPropertyNames()); + return columnSet; + } + + public static ColumnSet valueOf(byte[] id, Set set) { + ColumnSet columnSet = new ColumnSet(id); + columnSet.addAll(set); + return columnSet; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + Arrays.hashCode(id); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (getClass() != obj.getClass()) + return false; + ColumnSet that = (ColumnSet) obj; + if (!Bytes.equals(id, that.id)) + return false; + if (!super.equals(that)) + return false; + return true; + } + + @Override + public String toString() { + StringBuffer buffer = new StringBuffer(); + buffer.append("{"); + boolean firstEntry = true; + for (byte[] entry : this) { + if (!firstEntry) { + buffer.append(", "); + firstEntry = false; + } + buffer.append(Bytes.toString(entry)); + } + buffer.append("}"); + return buffer.toString(); + } +} \ No newline at end of file diff --git a/src/test/java/org/apache/hadoop/hbase/client/TestColumnList.java b/src/test/java/org/apache/hadoop/hbase/client/TestColumnList.java new file mode 100644 index 0000000..44309c2 --- /dev/null +++ b/src/test/java/org/apache/hadoop/hbase/client/TestColumnList.java @@ -0,0 +1,55 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; + +public class TestColumnList extends TestColumnObject { + @Override + protected ColumnList getColumnObject() { + List columnList = new ColumnList(TEST_OBJECT_ID); + columnList.add(TEST_PROPERTY_VALUES[0]); + columnList.add(TEST_PROPERTY_VALUES[1]); + return (ColumnList) columnList; + } + + @Override + protected ColumnList valueOf(ColumnObject object) { + return ColumnList.valueOf(object); + } + + @Override + protected void assertPropertyExists(byte[] propertyName, byte[] propertyValue) { + assertTrue("The property gotten " + Bytes.toString(propertyValue) + + " did not match that which was put" + getColumnObject(), + getColumnObject().contains(propertyValue)); + } + + @Override + protected Pair getTestProperty() { + return new Pair(Bytes.toBytes(0), TEST_PROPERTY_VALUES[0]); + } + +} diff --git a/src/test/java/org/apache/hadoop/hbase/client/TestColumnMap.java b/src/test/java/org/apache/hadoop/hbase/client/TestColumnMap.java new file mode 100644 index 0000000..dd6678a --- /dev/null +++ b/src/test/java/org/apache/hadoop/hbase/client/TestColumnMap.java @@ -0,0 +1,48 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.Map; +import org.apache.hadoop.hbase.util.Bytes; +import static org.junit.Assert.assertTrue; + +public class TestColumnMap extends TestColumnObject { + + @Override + protected ColumnMap getColumnObject() { + Map map = new ColumnMap(TEST_OBJECT_ID); + map.put(TEST_PROPERTY_NAMES[0], TEST_PROPERTY_VALUES[0]); + map.put(TEST_PROPERTY_NAMES[1], TEST_PROPERTY_VALUES[1]); + return (ColumnMap) map; + } + + @Override + protected ColumnMap valueOf(ColumnObject object) { + return ColumnMap.valueOf(object); + } + + @Override + protected void assertPropertyExists(byte[] propertyName, byte[] propertyValue) { + assertTrue("The property gotten did not match that which was put", + Bytes.equals(getColumnObject().getPropertyValue(propertyName), + propertyValue)); + + } +} diff --git a/src/test/java/org/apache/hadoop/hbase/client/TestColumnObject.java b/src/test/java/org/apache/hadoop/hbase/client/TestColumnObject.java new file mode 100644 index 0000000..6adef13 --- /dev/null +++ b/src/test/java/org/apache/hadoop/hbase/client/TestColumnObject.java @@ -0,0 +1,250 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public abstract class TestColumnObject { + final Log LOG = LogFactory.getLog(getClass()); + protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + protected static final byte[] TEST_TABLE = Bytes.toBytes("testColumns"); + protected static final byte[] TEST_ROW = Bytes.toBytes("testRow"); + protected static final byte[] TEST_FAMILY = Bytes.toBytes("testFamily"); + protected static final byte[] TEST_QUALIFIER = Bytes.toBytes("testQualifier"); + protected static final byte[] TEST_OBJECT_ID = new byte[] { 0 }; + protected static final byte[][] TEST_PROPERTY_NAMES = new byte[][] { + new byte[] { 0 }, new byte[] { 1 } }; + protected static final byte[][] TEST_PROPERTY_VALUES = new byte[][] { + new byte[] { 2 }, new byte[] { 3 } }; + + protected HTableInterface table; + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(3); + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + this.table = TEST_UTIL.createTable(TEST_TABLE, + new byte[][] { TEST_FAMILY }, conf); + } + + @After + public void tearDown() throws Exception { + new HBaseAdmin(TEST_UTIL.getConfiguration()).disableTable(TEST_TABLE); + new HBaseAdmin(TEST_UTIL.getConfiguration()).deleteTable(TEST_TABLE); + } + + protected abstract ColumnType getColumnObject(); + + protected abstract ColumnType valueOf(ColumnObject columnObject); + + protected abstract void assertPropertyExists(byte[] propertyName, + byte[] propertyValue); + + protected Pair getTestProperty() { + return new Pair(TEST_PROPERTY_NAMES[0], + TEST_PROPERTY_VALUES[0]); + } + + @Test + public void testObjectGet() throws IOException { + final byte[] row = TEST_ROW; + + ColumnObjectGet get = new ColumnObjectGet(row); + + get.addObject(TEST_FAMILY, TEST_QUALIFIER, TEST_OBJECT_ID); + + ColumnObjectResult result = new ColumnObjectResult(table.get(get)); + ColumnType getObject = valueOf(result.getColumnObject(TEST_FAMILY, + TEST_OBJECT_ID)); + + assertEquals("A non-existent object has one or more properties", 0, + getObject.getPropertyNames().size()); + } + + @Test + public void testObjectPut() throws IOException { + final byte[] row = TEST_ROW; + + ColumnObjectPut put = new ColumnObjectPut( + TEST_ROW); + + ColumnType putObject = getColumnObject(); + + put.add(TEST_FAMILY, TEST_QUALIFIER, putObject); + + table.put(put); + + ColumnObjectGet get = new ColumnObjectGet(row); + + get.addObject(TEST_FAMILY, TEST_QUALIFIER, TEST_OBJECT_ID); + + ColumnObjectResult result = new ColumnObjectResult(table.get(get)); + ColumnType getObject = valueOf(result.getColumnObject(TEST_FAMILY, + TEST_OBJECT_ID)); + + assertEquals("The object gotten did not match that which was put", putObject, + getObject); + } + + @Test + public void testObjectDelete() throws IOException { + final byte[] row = TEST_ROW; + + ColumnObjectPut put = new ColumnObjectPut( + TEST_ROW); + + ColumnType putObject = getColumnObject(); + + put.add(TEST_FAMILY, TEST_QUALIFIER, putObject); + + table.put(put); + + ColumnObjectDelete delete = new ColumnObjectDelete( + row); + + delete.deleteObject(TEST_FAMILY, TEST_QUALIFIER, putObject); + + table.delete(delete); + + ColumnObjectGet get = new ColumnObjectGet(row); + + get.addObject(TEST_FAMILY, TEST_QUALIFIER, TEST_OBJECT_ID); + + ColumnObjectResult result = new ColumnObjectResult(table.get(get)); + ColumnType getObject = valueOf(result.getColumnObject(TEST_FAMILY, + TEST_OBJECT_ID)); + + assertEquals("The object that was deleted still has properties", 0, + getObject.getPropertyNames().size()); + } + + @Test + public void testPropertyGet() throws IOException { + final byte[] row = TEST_ROW; + + ColumnObjectGet get = new ColumnObjectGet(row); + + get.addProperty(TEST_FAMILY, TEST_QUALIFIER, TEST_OBJECT_ID, + TEST_PROPERTY_NAMES[0]); + + ColumnObjectResult result = new ColumnObjectResult(table.get(get)); + ColumnType getObject = valueOf(result.getColumnObject(TEST_FAMILY, + TEST_OBJECT_ID)); + + assertEquals("A non-existent object has one or more properties", 0, + getObject.getPropertyNames().size()); + } + + @Test + public void testPropertyPut() throws IOException { + final byte[] row = TEST_ROW; + + Pair property = getTestProperty(); + byte[] propertyName = property.getFirst(); + byte[] propertyValue = property.getSecond(); + + ColumnObjectPut put = new ColumnObjectPut( + TEST_ROW); + + put.add(TEST_FAMILY, TEST_QUALIFIER, TEST_OBJECT_ID, propertyName, + propertyValue); + + table.put(put); + + ColumnObjectGet get = new ColumnObjectGet(row); + + get.addProperty(TEST_FAMILY, TEST_QUALIFIER, TEST_OBJECT_ID, propertyName); + + ColumnObjectResult result = new ColumnObjectResult(table.get(get)); + ColumnType getObject = valueOf(result.getColumnObject(TEST_FAMILY, + TEST_OBJECT_ID)); + + assertPropertyExists(propertyName, getObject.getPropertyValue(propertyName)); + } + + @Test + public void testPropertyDelete() throws IOException { + final byte[] row = TEST_ROW; + + Pair property = getTestProperty(); + byte[] propertyName = property.getFirst(); + byte[] propertyValue = property.getSecond(); + + ColumnObjectPut put = new ColumnObjectPut( + TEST_ROW); + + put.add(TEST_FAMILY, TEST_QUALIFIER, TEST_OBJECT_ID, propertyName, + propertyValue); + + table.put(put); + + ColumnObjectDelete delete = new ColumnObjectDelete( + row); + + delete.deleteProperty(TEST_FAMILY, TEST_QUALIFIER, TEST_OBJECT_ID, + propertyName); + + table.delete(delete); + + ColumnObjectGet get = new ColumnObjectGet(row); + + get.addObject(TEST_FAMILY, TEST_QUALIFIER, TEST_OBJECT_ID); + + ColumnObjectResult result = new ColumnObjectResult(table.get(get)); + ColumnType getObject = valueOf(result.getColumnObject(TEST_FAMILY, + TEST_OBJECT_ID)); + + assertEquals("The object that was deleted still has properties", 0, + getObject.getPropertyNames().size()); + } +} diff --git a/src/test/java/org/apache/hadoop/hbase/client/TestColumnSet.java b/src/test/java/org/apache/hadoop/hbase/client/TestColumnSet.java new file mode 100644 index 0000000..31a1690 --- /dev/null +++ b/src/test/java/org/apache/hadoop/hbase/client/TestColumnSet.java @@ -0,0 +1,46 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertTrue; + +import java.util.Set; + +public class TestColumnSet extends TestColumnObject { + @Override + protected ColumnSet getColumnObject() { + Set columnSet = new ColumnSet(TEST_OBJECT_ID); + columnSet.add(TEST_PROPERTY_NAMES[0]); + columnSet.add(TEST_PROPERTY_NAMES[1]); + return (ColumnSet) columnSet; + } + + @Override + protected ColumnSet valueOf(ColumnObject object) { + return ColumnSet.valueOf(object); + } + + @Override + protected void assertPropertyExists(byte[] propertyName, byte[] propertyValue) { + assertTrue("The property gotten did not match that which was put", + getColumnObject().contains(propertyName)); + + } +}