in
+ * @param in Where to read bytes from. Creates a byte array to hold the KeyValue
+ * backing bytes copied from the steam.
+ * @return KeyValue created by deserializing from in OR if we find a length
+ * of zero, we will return null which can be useful marking a stream as done.
* @throws IOException
*/
public static KeyValue create(final DataInput in) throws IOException {
@@ -2311,10 +2316,12 @@ public class KeyValue implements Cell, HeapSize {
* Create a KeyValue reading length from in
* @param length
* @param in
- * @return Created KeyValue
+ * @return Created KeyValue OR if we find a length of zero, we will return null which
+ * can be useful marking a stream as done.
* @throws IOException
*/
public static KeyValue create(int length, final DataInput in) throws IOException {
+ if (length == 0) return null;
// This is how the old Writables.readFrom used to deserialize. Didn't even vint.
byte [] bytes = new byte[length];
in.readFully(bytes);
@@ -2322,6 +2329,24 @@ public class KeyValue implements Cell, HeapSize {
}
/**
+ * Create a KeyValue reading from the raw InputStream.
+ * Named iscreate so doesn't clash with {@link #create(DataInput)}
+ * @param in
+ * @return Created KeyValue OR if we find a length of zero, we will return null which
+ * can be useful marking a stream as done.
+ * @throws IOException
+ */
+ public static KeyValue iscreate(final InputStream in) throws IOException {
+ byte [] intBytes = new byte[Bytes.SIZEOF_INT];
+ int length = in.read(intBytes);
+ if (length == 0) return null;
+ if (length != intBytes.length) throw new IOException("Failed read of int length " + length);
+ byte [] bytes = new byte[Bytes.toInt(intBytes)];
+ IOUtils.readFully(in, bytes, 0, bytes.length);
+ return new KeyValue(bytes, 0, bytes.length);
+ }
+
+ /**
* Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
* @param kv
* @param out
@@ -2330,8 +2355,8 @@ public class KeyValue implements Cell, HeapSize {
* @see #create(DataInput) for the inverse function
*/
public static long write(final KeyValue kv, final DataOutput out) throws IOException {
- // This is how the old Writables write used to serialize KVs. Need to figure way to make it work for all
- // implementations.
+ // This is how the old Writables write used to serialize KVs. Need to figure way to make it
+ // work for all implementations.
int length = kv.getLength();
out.writeInt(length);
out.write(kv.getBuffer(), kv.getOffset(), length);
@@ -2339,6 +2364,25 @@ public class KeyValue implements Cell, HeapSize {
}
/**
+ * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable but do
+ * not require a {@link DataOutput}, just take plain {@link OutputStream}
+ * Named oswrite so does not clash with {@link #write(KeyValue, DataOutput)}
+ * @param kv
+ * @param out
+ * @return Length written on stream
+ * @throws IOException
+ * @see #create(DataInput) for the inverse function
+ * @see #write(KeyValue, DataOutput)
+ */
+ public static long oswrite(final KeyValue kv, final OutputStream out) throws IOException {
+ int length = kv.getLength();
+ // This does same as DataOuput#writeInt (big-endian, etc.)
+ out.write(Bytes.toBytes(length));
+ out.write(kv.getBuffer(), kv.getOffset(), length);
+ return length + Bytes.SIZEOF_INT;
+ }
+
+ /**
* Compare key portion of a {@link KeyValue} for keys in -ROOT-
* table.
*/
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
index c2052b0..8318522 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.IterableUtils;
import org.apache.hadoop.hbase.util.Strings;
-import org.apache.hbase.cell.CellComparator;
+import org.apache.hbase.CellComparator;
import com.google.common.collect.Lists;
@@ -63,10 +63,10 @@ public class KeyValueTestUtil {
public static ByteBuffer toByteBufferAndRewind(final Iterable extends KeyValue> kvs,
boolean includeMemstoreTS) {
- int totalBytes = KeyValueTool.totalLengthWithMvccVersion(kvs, includeMemstoreTS);
+ int totalBytes = KeyValueUtil.totalLengthWithMvccVersion(kvs, includeMemstoreTS);
ByteBuffer bb = ByteBuffer.allocate(totalBytes);
for (KeyValue kv : IterableUtils.nullSafe(kvs)) {
- KeyValueTool.appendToByteBuffer(bb, kv, includeMemstoreTS);
+ KeyValueUtil.appendToByteBuffer(bb, kv, includeMemstoreTS);
}
bb.rewind();
return bb;
@@ -99,7 +99,7 @@ public class KeyValueTestUtil {
List kvs = Lists.newArrayList();
KeyValue kv = null;
while (true) {
- kv = KeyValueTool.nextShallowCopy(bb, includesMemstoreTS);
+ kv = KeyValueUtil.nextShallowCopy(bb, includesMemstoreTS);
if (kv == null) {
break;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTool.java
deleted file mode 100644
index e6297e2..0000000
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTool.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase;
-
-import java.nio.ByteBuffer;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.util.ByteBufferUtils;
-import org.apache.hadoop.hbase.util.ByteRange;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.IterableUtils;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hbase.Cell;
-import org.apache.hbase.cell.CellTool;
-
-/**
- * static convenience methods for dealing with KeyValues and collections of KeyValues
- */
-@InterfaceAudience.Private
-public class KeyValueTool {
-
- /**************** length *********************/
-
- public static int length(final Cell cell) {
- return (int)KeyValue.getKeyValueDataStructureSize(cell.getRowLength(), cell.getFamilyLength(),
- cell.getQualifierLength(), cell.getValueLength());
- }
-
- protected static int keyLength(final Cell cell) {
- return (int)KeyValue.getKeyDataStructureSize(cell.getRowLength(), cell.getFamilyLength(),
- cell.getQualifierLength());
- }
-
- public static int lengthWithMvccVersion(final KeyValue kv, final boolean includeMvccVersion) {
- int length = kv.getLength();
- if (includeMvccVersion) {
- length += WritableUtils.getVIntSize(kv.getMvccVersion());
- }
- return length;
- }
-
- public static int totalLengthWithMvccVersion(final Iterable extends KeyValue> kvs,
- final boolean includeMvccVersion) {
- int length = 0;
- for (KeyValue kv : IterableUtils.nullSafe(kvs)) {
- length += lengthWithMvccVersion(kv, includeMvccVersion);
- }
- return length;
- }
-
-
- /**************** copy key only *********************/
-
- public static KeyValue copyToNewKeyValue(final Cell cell) {
- KeyValue kvCell = new KeyValue(copyToNewByteArray(cell));
- kvCell.setMvccVersion(cell.getMvccVersion());
- return kvCell;
- }
-
- public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) {
- byte[] bytes = new byte[keyLength(cell)];
- appendKeyToByteArrayWithoutValue(cell, bytes, 0);
- ByteBuffer buffer = ByteBuffer.wrap(bytes);
- buffer.position(buffer.limit());//make it look as if each field were appended
- return buffer;
- }
-
- public static byte[] copyToNewByteArray(final Cell cell) {
- int v1Length = length(cell);
- byte[] backingBytes = new byte[v1Length];
- appendToByteArray(cell, backingBytes, 0);
- return backingBytes;
- }
-
- protected static int appendKeyToByteArrayWithoutValue(final Cell cell, final byte[] output,
- final int offset) {
- int nextOffset = offset;
- nextOffset = Bytes.putShort(output, nextOffset, cell.getRowLength());
- nextOffset = CellTool.copyRowTo(cell, output, nextOffset);
- nextOffset = Bytes.putByte(output, nextOffset, cell.getFamilyLength());
- nextOffset = CellTool.copyFamilyTo(cell, output, nextOffset);
- nextOffset = CellTool.copyQualifierTo(cell, output, nextOffset);
- nextOffset = Bytes.putLong(output, nextOffset, cell.getTimestamp());
- nextOffset = Bytes.putByte(output, nextOffset, cell.getTypeByte());
- return nextOffset;
- }
-
-
- /**************** copy key and value *********************/
-
- public static int appendToByteArray(final Cell cell, final byte[] output, final int offset) {
- int pos = offset;
- pos = Bytes.putInt(output, pos, keyLength(cell));
- pos = Bytes.putInt(output, pos, cell.getValueLength());
- pos = appendKeyToByteArrayWithoutValue(cell, output, pos);
- CellTool.copyValueTo(cell, output, pos);
- return pos + cell.getValueLength();
- }
-
- public static ByteBuffer copyToNewByteBuffer(final Cell cell) {
- byte[] bytes = new byte[length(cell)];
- appendToByteArray(cell, bytes, 0);
- ByteBuffer buffer = ByteBuffer.wrap(bytes);
- buffer.position(buffer.limit());//make it look as if each field were appended
- return buffer;
- }
-
- public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv,
- final boolean includeMvccVersion) {
- // keep pushing the limit out. assume enough capacity
- bb.limit(bb.position() + kv.getLength());
- bb.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
- if (includeMvccVersion) {
- int numMvccVersionBytes = WritableUtils.getVIntSize(kv.getMvccVersion());
- ByteBufferUtils.extendLimit(bb, numMvccVersionBytes);
- ByteBufferUtils.writeVLong(bb, kv.getMvccVersion());
- }
- }
-
-
- /**************** iterating *******************************/
-
- /**
- * Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
- * position to the start of the next KeyValue. Does not allocate a new array or copy data.
- */
- public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion) {
- if (bb.isDirect()) {
- throw new IllegalArgumentException("only supports heap buffers");
- }
- if (bb.remaining() < 1) {
- return null;
- }
- int underlyingArrayOffset = bb.arrayOffset() + bb.position();
- int keyLength = bb.getInt();
- int valueLength = bb.getInt();
- int kvLength = KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE + keyLength + valueLength;
- KeyValue keyValue = new KeyValue(bb.array(), underlyingArrayOffset, kvLength);
- ByteBufferUtils.skip(bb, keyLength + valueLength);
- if (includesMvccVersion) {
- long mvccVersion = ByteBufferUtils.readVLong(bb);
- keyValue.setMvccVersion(mvccVersion);
- }
- return keyValue;
- }
-
-
- /*************** next/previous **********************************/
-
- /**
- * Append single byte 0x00 to the end of the input row key
- */
- public static KeyValue createFirstKeyInNextRow(final Cell in){
- byte[] nextRow = new byte[in.getRowLength() + 1];
- System.arraycopy(in.getRowArray(), in.getRowOffset(), nextRow, 0, in.getRowLength());
- nextRow[nextRow.length - 1] = 0;//maybe not necessary
- return KeyValue.createFirstOnRow(nextRow);
- }
-
- /**
- * Increment the row bytes and clear the other fields
- */
- public static KeyValue createFirstKeyInIncrementedRow(final Cell in){
- byte[] thisRow = new ByteRange(in.getRowArray(), in.getRowOffset(), in.getRowLength())
- .deepCopyToNewArray();
- byte[] nextRow = Bytes.unsignedCopyAndIncrement(thisRow);
- return KeyValue.createFirstOnRow(nextRow);
- }
-
- /**
- * Decrement the timestamp. For tests (currently wasteful)
- *
- * Remember timestamps are sorted reverse chronologically.
- * @param in
- * @return previous key
- */
- public static KeyValue previousKey(final KeyValue in) {
- return KeyValue.createFirstOnRow(CellTool.getRowArray(in), CellTool.getFamilyArray(in),
- CellTool.getQualifierArray(in), in.getTimestamp() - 1);
- }
-}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
new file mode 100644
index 0000000..75a4b55
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.ByteBufferUtils;
+import org.apache.hadoop.hbase.util.ByteRange;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.IterableUtils;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hbase.Cell;
+import org.apache.hbase.CellUtil;
+
+/**
+ * static convenience methods for dealing with KeyValues and collections of KeyValues
+ */
+@InterfaceAudience.Private
+public class KeyValueUtil {
+
+ /**************** length *********************/
+
+ public static int length(final Cell cell) {
+ return (int)KeyValue.getKeyValueDataStructureSize(cell.getRowLength(), cell.getFamilyLength(),
+ cell.getQualifierLength(), cell.getValueLength());
+ }
+
+ protected static int keyLength(final Cell cell) {
+ return (int)KeyValue.getKeyDataStructureSize(cell.getRowLength(), cell.getFamilyLength(),
+ cell.getQualifierLength());
+ }
+
+ public static int lengthWithMvccVersion(final KeyValue kv, final boolean includeMvccVersion) {
+ int length = kv.getLength();
+ if (includeMvccVersion) {
+ length += WritableUtils.getVIntSize(kv.getMvccVersion());
+ }
+ return length;
+ }
+
+ public static int totalLengthWithMvccVersion(final Iterable extends KeyValue> kvs,
+ final boolean includeMvccVersion) {
+ int length = 0;
+ for (KeyValue kv : IterableUtils.nullSafe(kvs)) {
+ length += lengthWithMvccVersion(kv, includeMvccVersion);
+ }
+ return length;
+ }
+
+
+ /**************** copy key only *********************/
+
+ public static KeyValue copyToNewKeyValue(final Cell cell) {
+ KeyValue kvCell = new KeyValue(copyToNewByteArray(cell));
+ kvCell.setMvccVersion(cell.getMvccVersion());
+ return kvCell;
+ }
+
+ public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) {
+ byte[] bytes = new byte[keyLength(cell)];
+ appendKeyToByteArrayWithoutValue(cell, bytes, 0);
+ ByteBuffer buffer = ByteBuffer.wrap(bytes);
+ buffer.position(buffer.limit());//make it look as if each field were appended
+ return buffer;
+ }
+
+ public static byte[] copyToNewByteArray(final Cell cell) {
+ int v1Length = length(cell);
+ byte[] backingBytes = new byte[v1Length];
+ appendToByteArray(cell, backingBytes, 0);
+ return backingBytes;
+ }
+
+ protected static int appendKeyToByteArrayWithoutValue(final Cell cell, final byte[] output,
+ final int offset) {
+ int nextOffset = offset;
+ nextOffset = Bytes.putShort(output, nextOffset, cell.getRowLength());
+ nextOffset = CellUtil.copyRowTo(cell, output, nextOffset);
+ nextOffset = Bytes.putByte(output, nextOffset, cell.getFamilyLength());
+ nextOffset = CellUtil.copyFamilyTo(cell, output, nextOffset);
+ nextOffset = CellUtil.copyQualifierTo(cell, output, nextOffset);
+ nextOffset = Bytes.putLong(output, nextOffset, cell.getTimestamp());
+ nextOffset = Bytes.putByte(output, nextOffset, cell.getTypeByte());
+ return nextOffset;
+ }
+
+
+ /**************** copy key and value *********************/
+
+ public static int appendToByteArray(final Cell cell, final byte[] output, final int offset) {
+ int pos = offset;
+ pos = Bytes.putInt(output, pos, keyLength(cell));
+ pos = Bytes.putInt(output, pos, cell.getValueLength());
+ pos = appendKeyToByteArrayWithoutValue(cell, output, pos);
+ CellUtil.copyValueTo(cell, output, pos);
+ return pos + cell.getValueLength();
+ }
+
+ public static ByteBuffer copyToNewByteBuffer(final Cell cell) {
+ byte[] bytes = new byte[length(cell)];
+ appendToByteArray(cell, bytes, 0);
+ ByteBuffer buffer = ByteBuffer.wrap(bytes);
+ buffer.position(buffer.limit());//make it look as if each field were appended
+ return buffer;
+ }
+
+ public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv,
+ final boolean includeMvccVersion) {
+ // keep pushing the limit out. assume enough capacity
+ bb.limit(bb.position() + kv.getLength());
+ bb.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
+ if (includeMvccVersion) {
+ int numMvccVersionBytes = WritableUtils.getVIntSize(kv.getMvccVersion());
+ ByteBufferUtils.extendLimit(bb, numMvccVersionBytes);
+ ByteBufferUtils.writeVLong(bb, kv.getMvccVersion());
+ }
+ }
+
+
+ /**************** iterating *******************************/
+
+ /**
+ * Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
+ * position to the start of the next KeyValue. Does not allocate a new array or copy data.
+ */
+ public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion) {
+ if (bb.isDirect()) {
+ throw new IllegalArgumentException("only supports heap buffers");
+ }
+ if (bb.remaining() < 1) {
+ return null;
+ }
+ int underlyingArrayOffset = bb.arrayOffset() + bb.position();
+ int keyLength = bb.getInt();
+ int valueLength = bb.getInt();
+ int kvLength = KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE + keyLength + valueLength;
+ KeyValue keyValue = new KeyValue(bb.array(), underlyingArrayOffset, kvLength);
+ ByteBufferUtils.skip(bb, keyLength + valueLength);
+ if (includesMvccVersion) {
+ long mvccVersion = ByteBufferUtils.readVLong(bb);
+ keyValue.setMvccVersion(mvccVersion);
+ }
+ return keyValue;
+ }
+
+
+ /*************** next/previous **********************************/
+
+ /**
+ * Append single byte 0x00 to the end of the input row key
+ */
+ public static KeyValue createFirstKeyInNextRow(final Cell in){
+ byte[] nextRow = new byte[in.getRowLength() + 1];
+ System.arraycopy(in.getRowArray(), in.getRowOffset(), nextRow, 0, in.getRowLength());
+ nextRow[nextRow.length - 1] = 0;//maybe not necessary
+ return KeyValue.createFirstOnRow(nextRow);
+ }
+
+ /**
+ * Increment the row bytes and clear the other fields
+ */
+ public static KeyValue createFirstKeyInIncrementedRow(final Cell in){
+ byte[] thisRow = new ByteRange(in.getRowArray(), in.getRowOffset(), in.getRowLength())
+ .deepCopyToNewArray();
+ byte[] nextRow = Bytes.unsignedCopyAndIncrement(thisRow);
+ return KeyValue.createFirstOnRow(nextRow);
+ }
+
+ /**
+ * Decrement the timestamp. For tests (currently wasteful)
+ *
+ * Remember timestamps are sorted reverse chronologically.
+ * @param in
+ * @return previous key
+ */
+ public static KeyValue previousKey(final KeyValue in) {
+ return KeyValue.createFirstOnRow(CellUtil.getRowArray(in), CellUtil.getFamilyArray(in),
+ CellUtil.getQualifierArray(in), in.getTimestamp() - 1);
+ }
+
+ /*************** misc **********************************/
+ /**
+ * @param cell
+ * @return cell if it is an instance of {@link KeyValue} else we will return a
+ * new {@link KeyValue} instance made from cell
+ */
+ public static Cell ensureKeyValue(final Cell cell) {
+ return cell instanceof KeyValue? cell: copyToNewKeyValue(cell);
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index e9ed852..58f33ea 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -628,7 +628,9 @@ public class Bytes {
}
/**
- * Convert an int value to a byte array
+ * Convert an int value to a byte array. Big-endian. Same as what DataOutputStream.writeInt
+ * does.
+ *
* @param val value
* @return the byte array
*/
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/LoadTestKVGenerator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/LoadTestKVGenerator.java
index 5a64a1b..6a30ca3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/LoadTestKVGenerator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/LoadTestKVGenerator.java
@@ -16,7 +16,6 @@
*/
package org.apache.hadoop.hbase.util.test;
-import java.util.Map;
import java.util.Random;
import org.apache.hadoop.hbase.util.Bytes;
diff --git a/hbase-common/src/main/java/org/apache/hbase/Cell.java b/hbase-common/src/main/java/org/apache/hbase/Cell.java
index 044b287..3dc7941 100644
--- a/hbase-common/src/main/java/org/apache/hbase/Cell.java
+++ b/hbase-common/src/main/java/org/apache/hbase/Cell.java
@@ -20,7 +20,6 @@ package org.apache.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hbase.cell.CellTool;
/**
@@ -43,7 +42,7 @@ import org.apache.hbase.cell.CellTool;
* the goal of sorting newer cells first.
*
* This interface does not include methods that allocate new byte[]'s such as those used in client
- * or debugging code. These should be placed in a sub-interface or the {@link CellTool} class.
+ * or debugging code. These should be placed in a sub-interface or the {@link CellUtil} class.
*
* Cell implements Comparable which is only meaningful when comparing to other keys in the
* same table. It uses CellComparator which does not work on the -ROOT- and .META. tables.
diff --git a/hbase-common/src/main/java/org/apache/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hbase/CellComparator.java
new file mode 100644
index 0000000..d2d20fd
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hbase/CellComparator.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hbase;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.common.primitives.Longs;
+
+/**
+ * Compare two HBase cells. Do not use this method comparing -ROOT- or
+ * .META. cells. Cells from these tables need a specialized comparator, one that
+ * takes account of the special formatting of the row where we have commas to delimit table from
+ * regionname, from row. See KeyValue for how it has a special comparator to do .META. cells
+ * and yet another for -ROOT-.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class CellComparator implements Comparator, Serializable{
+ private static final long serialVersionUID = -8760041766259623329L;
+
+ @Override
+ public int compare(Cell a, Cell b) {
+ return compareStatic(a, b);
+ }
+
+
+ public static int compareStatic(Cell a, Cell b) {
+ //row
+ int c = Bytes.compareTo(
+ a.getRowArray(), a.getRowOffset(), a.getRowLength(),
+ b.getRowArray(), b.getRowOffset(), b.getRowLength());
+ if (c != 0) return c;
+
+ //family
+ c = Bytes.compareTo(
+ a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(),
+ b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength());
+ if (c != 0) return c;
+
+ //qualifier
+ c = Bytes.compareTo(
+ a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(),
+ b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength());
+ if (c != 0) return c;
+
+ //timestamp: later sorts first
+ c = -Longs.compare(a.getTimestamp(), b.getTimestamp());
+ if (c != 0) return c;
+
+ //type
+ c = (0xff & a.getTypeByte()) - (0xff & b.getTypeByte());
+ if (c != 0) return c;
+
+ //mvccVersion: later sorts first
+ return -Longs.compare(a.getMvccVersion(), b.getMvccVersion());
+ }
+
+
+ /**************** equals ****************************/
+
+ public static boolean equals(Cell a, Cell b){
+ return equalsRow(a, b)
+ && equalsFamily(a, b)
+ && equalsQualifier(a, b)
+ && equalsTimestamp(a, b)
+ && equalsType(a, b);
+ }
+
+ public static boolean equalsRow(Cell a, Cell b){
+ return Bytes.equals(
+ a.getRowArray(), a.getRowOffset(), a.getRowLength(),
+ b.getRowArray(), b.getRowOffset(), b.getRowLength());
+ }
+
+ public static boolean equalsFamily(Cell a, Cell b){
+ return Bytes.equals(
+ a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(),
+ b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength());
+ }
+
+ public static boolean equalsQualifier(Cell a, Cell b){
+ return Bytes.equals(
+ a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(),
+ b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength());
+ }
+
+ public static boolean equalsTimestamp(Cell a, Cell b){
+ return a.getTimestamp() == b.getTimestamp();
+ }
+
+ public static boolean equalsType(Cell a, Cell b){
+ return a.getTypeByte() == b.getTypeByte();
+ }
+
+
+ /********************* hashCode ************************/
+
+ /**
+ * Returns a hash code that is always the same for two Cells having a matching equals(..) result.
+ * Currently does not guard against nulls, but it could if necessary.
+ */
+ public static int hashCode(Cell cell){
+ if (cell == null) {// return 0 for empty Cell
+ return 0;
+ }
+
+ //pre-calculate the 3 hashes made of byte ranges
+ int rowHash = Bytes.hashCode(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
+ int familyHash =
+ Bytes.hashCode(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
+ int qualifierHash =
+ Bytes.hashCode(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
+
+ //combine the 6 sub-hashes
+ int hash = 31 * rowHash + familyHash;
+ hash = 31 * hash + qualifierHash;
+ hash = 31 * hash + (int)cell.getTimestamp();
+ hash = 31 * hash + cell.getTypeByte();
+ hash = 31 * hash + (int)cell.getMvccVersion();
+ return hash;
+ }
+
+
+ /******************** lengths *************************/
+
+ public static boolean areKeyLengthsEqual(Cell a, Cell b) {
+ return a.getRowLength() == b.getRowLength()
+ && a.getFamilyLength() == b.getFamilyLength()
+ && a.getQualifierLength() == b.getQualifierLength();
+ }
+
+ public static boolean areRowLengthsEqual(Cell a, Cell b) {
+ return a.getRowLength() == b.getRowLength();
+ }
+
+
+ /***************** special cases ****************************/
+
+ /**
+ * special case for KeyValue.equals
+ */
+ private static int compareStaticIgnoreMvccVersion(Cell a, Cell b) {
+ //row
+ int c = Bytes.compareTo(
+ a.getRowArray(), a.getRowOffset(), a.getRowLength(),
+ b.getRowArray(), b.getRowOffset(), b.getRowLength());
+ if (c != 0) return c;
+
+ //family
+ c = Bytes.compareTo(
+ a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(),
+ b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength());
+ if (c != 0) return c;
+
+ //qualifier
+ c = Bytes.compareTo(
+ a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(),
+ b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength());
+ if (c != 0) return c;
+
+ //timestamp: later sorts first
+ c = -Longs.compare(a.getTimestamp(), b.getTimestamp());
+ if (c != 0) return c;
+
+ //type
+ c = (0xff & a.getTypeByte()) - (0xff & b.getTypeByte());
+ return c;
+ }
+
+ /**
+ * special case for KeyValue.equals
+ */
+ public static boolean equalsIgnoreMvccVersion(Cell a, Cell b){
+ return 0 == compareStaticIgnoreMvccVersion(a, b);
+ }
+
+}
diff --git a/hbase-common/src/main/java/org/apache/hbase/CellScannable.java b/hbase-common/src/main/java/org/apache/hbase/CellScannable.java
new file mode 100644
index 0000000..deefaab
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hbase/CellScannable.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase;
+
+/**
+ * Implementer can return a CellScanner over its Cell content.
+ * Class name is ugly but mimicing java.util.Iterable only we are about the dumber
+ * CellScanner rather than say Iterator| . See CellScanner class comment for why we go
+ * dumber than java.util.Iterator.
+ */
+public interface CellScannable {
+ /**
+ * @return A CellScanner over the contained {@link Cell}s
+ */
+ CellScanner cellScanner();
+}
\ No newline at end of file
diff --git a/hbase-common/src/main/java/org/apache/hbase/CellScanner.java b/hbase-common/src/main/java/org/apache/hbase/CellScanner.java
new file mode 100644
index 0000000..224bed5
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hbase/CellScanner.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hbase;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hbase.Cell;
+
+/**
+ * An interface for iterating through a sequence of cells. Similar to Java's Iterator, but without
+ * the hasNext() or remove() methods. The hasNext() method is problematic because it may require
+ * actually loading the next object, which in turn requires storing the previous object somewhere.
+ *
+ * The core data block decoder should be as fast as possible, so we push the complexity and
+ * performance expense of concurrently tracking multiple cells to layers above the CellScanner.
+ *
+ * The {@link #current()} method will return a reference to a Cell implementation. This reference may
+ * or may not point to a reusable cell implementation, so users of the CellScanner should not, for
+ * example, accumulate a List of Cells. All of the references may point to the same object, which
+ * would be the latest state of the underlying Cell. In short, the Cell is mutable.
+ *
+ * Typical usage:
+ *
+ *
+ * while (scanner.next()) {
+ * Cell cell = scanner.get();
+ * // do something
+ * }
+ *
+ * Often used reading {@link org.apache.hbase.Cell}s written by {@link org.apache.hbase.io.CellOutputStream}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface CellScanner {
+ /**
+ * @return the current Cell which may be mutable
+ */
+ Cell current();
+
+ /**
+ * Advance the scanner 1 cell.
+ * @return true if the next cell is found and {@link #current()} will return a valid Cell
+ */
+ boolean advance();
+}
diff --git a/hbase-common/src/main/java/org/apache/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hbase/CellUtil.java
new file mode 100644
index 0000000..4b444ab
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hbase/CellUtil.java
@@ -0,0 +1,243 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hbase;
+
+import java.nio.ByteBuffer;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.ByteRange;
+import org.apache.hadoop.hbase.KeyValue;
+
+/**
+ * Utility methods helpful slinging {@link Cell} instances.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class CellUtil {
+
+ /******************* ByteRange *******************************/
+
+ public static ByteRange fillRowRange(Cell cell, ByteRange range) {
+ return range.set(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
+ }
+
+ public static ByteRange fillFamilyRange(Cell cell, ByteRange range) {
+ return range.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
+ }
+
+ public static ByteRange fillQualifierRange(Cell cell, ByteRange range) {
+ return range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
+ cell.getQualifierLength());
+ }
+
+
+ /***************** get individual arrays for tests ************/
+
+ public static byte[] getRowArray(Cell cell){
+ byte[] output = new byte[cell.getRowLength()];
+ copyRowTo(cell, output, 0);
+ return output;
+ }
+
+ public static byte[] getFamilyArray(Cell cell){
+ byte[] output = new byte[cell.getFamilyLength()];
+ copyFamilyTo(cell, output, 0);
+ return output;
+ }
+
+ public static byte[] getQualifierArray(Cell cell){
+ byte[] output = new byte[cell.getQualifierLength()];
+ copyQualifierTo(cell, output, 0);
+ return output;
+ }
+
+ public static byte[] getValueArray(Cell cell){
+ byte[] output = new byte[cell.getValueLength()];
+ copyValueTo(cell, output, 0);
+ return output;
+ }
+
+
+ /******************** copyTo **********************************/
+
+ public static int copyRowTo(Cell cell, byte[] destination, int destinationOffset) {
+ System.arraycopy(cell.getRowArray(), cell.getRowOffset(), destination, destinationOffset,
+ cell.getRowLength());
+ return destinationOffset + cell.getRowLength();
+ }
+
+ public static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset) {
+ System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), destination, destinationOffset,
+ cell.getFamilyLength());
+ return destinationOffset + cell.getFamilyLength();
+ }
+
+ public static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset) {
+ System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), destination,
+ destinationOffset, cell.getQualifierLength());
+ return destinationOffset + cell.getQualifierLength();
+ }
+
+ public static int copyValueTo(Cell cell, byte[] destination, int destinationOffset) {
+ System.arraycopy(cell.getValueArray(), cell.getValueOffset(), destination, destinationOffset,
+ cell.getValueLength());
+ return destinationOffset + cell.getValueLength();
+ }
+
+
+ /********************* misc *************************************/
+
+ public static byte getRowByte(Cell cell, int index) {
+ return cell.getRowArray()[cell.getRowOffset() + index];
+ }
+
+ public static ByteBuffer getValueBufferShallowCopy(Cell cell) {
+ ByteBuffer buffer = ByteBuffer.wrap(cell.getValueArray(), cell.getValueOffset(),
+ cell.getValueLength());
+// buffer.position(buffer.limit());//make it look as if value was appended
+ return buffer;
+ }
+
+ public static Cell createCell(final byte [] row, final byte [] family, final byte [] qualifier,
+ final long timestamp, final byte type, final byte [] value) {
+ // I need a Cell Factory here. Using KeyValue for now. TODO.
+ // TODO: Make a new Cell implementation that just carries these
+ // byte arrays.
+ return new KeyValue(row, family, qualifier, timestamp,
+ KeyValue.Type.codeToType(type), value);
+ }
+
+ /**
+ * @param cellScannerables
+ * @return CellScanner interface over cellIterables
+ */
+ public static CellScanner createCellScanner(final List cellScannerables) {
+ return new CellScanner() {
+ private final Iterator iterator = cellScannerables.iterator();
+ private CellScanner cellScanner = null;
+
+ @Override
+ public Cell current() {
+ return this.cellScanner != null? this.cellScanner.current(): null;
+ }
+
+ @Override
+ public boolean advance() {
+ if (this.cellScanner == null) {
+ if (!this.iterator.hasNext()) return false;
+ this.cellScanner = this.iterator.next().cellScanner();
+ }
+ if (this.cellScanner.advance()) return true;
+ this.cellScanner = null;
+ return advance();
+ }
+ };
+ }
+
+ /**
+ * @param cellIterable
+ * @return CellScanner interface over cellIterable
+ */
+ public static CellScanner createCellScanner(final Iterable cellIterable) {
+ return createCellScanner(cellIterable.iterator());
+ }
+
+ /**
+ * @param cells
+ * @return CellScanner interface over cellIterable
+ */
+ public static CellScanner createCellScanner(final Iterator cells) {
+ return new CellScanner() {
+ private final Iterator iterator = cells;
+ private Cell current = null;
+
+ @Override
+ public Cell current() {
+ return this.current;
+ }
+
+ @Override
+ public boolean advance() {
+ boolean hasNext = this.iterator.hasNext();
+ this.current = hasNext? this.iterator.next(): null;
+ return hasNext;
+ }
+ };
+ }
+
+ /**
+ * @param cellArray
+ * @return CellScanner interface over cellArray
+ */
+ public static CellScanner createCellScanner(final Cell[] cellArray) {
+ return new CellScanner() {
+ private final Cell [] cells = cellArray;
+ private int index = -1;
+
+ @Override
+ public Cell current() {
+ return (index < 0)? null: this.cells[index];
+ }
+
+ @Override
+ public boolean advance() {
+ return ++index < this.cells.length;
+ }
+ };
+ }
+
+ /**
+ * Flatten the map of cells out under the CellScanner
+ * @param map Map of Cell Lists; for example, the map of families to Cells that is used
+ * inside Put, etc., keeping Cells organized by family.
+ * @return CellScanner interface over cellIterable
+ */
+ public static CellScanner createCellScanner(final NavigableMap> map) {
+ return new CellScanner() {
+ private final Iterator>> entries = map.entrySet().iterator();
+ private Iterator currentIterator = null;
+ private Cell currentCell;
+
+ @Override
+ public Cell current() {
+ return this.currentCell;
+ }
+
+ @Override
+ public boolean advance() {
+ if (this.currentIterator == null) {
+ if (!this.entries.hasNext()) return false;
+ this.currentIterator = this.entries.next().getValue().iterator();
+ }
+ if (this.currentIterator.hasNext()) {
+ this.currentCell = this.currentIterator.next();
+ return true;
+ }
+ this.currentCell = null;
+ this.currentIterator = null;
+ return advance();
+ }
+ };
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hbase/cell/CellComparator.java b/hbase-common/src/main/java/org/apache/hbase/cell/CellComparator.java
deleted file mode 100644
index ce9d063..0000000
--- a/hbase-common/src/main/java/org/apache/hbase/cell/CellComparator.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hbase.cell;
-
-import java.io.Serializable;
-import java.util.Comparator;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hbase.Cell;
-
-import com.google.common.primitives.Longs;
-
-/**
- * Compare two traditional HBase cells.
- *
- * Note: This comparator is not valid for -ROOT- and .META. tables.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class CellComparator implements Comparator, Serializable{
- private static final long serialVersionUID = -8760041766259623329L;
-
- @Override
- public int compare(Cell a, Cell b) {
- return compareStatic(a, b);
- }
-
-
- public static int compareStatic(Cell a, Cell b) {
- //row
- int c = Bytes.compareTo(
- a.getRowArray(), a.getRowOffset(), a.getRowLength(),
- b.getRowArray(), b.getRowOffset(), b.getRowLength());
- if (c != 0) return c;
-
- //family
- c = Bytes.compareTo(
- a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(),
- b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength());
- if (c != 0) return c;
-
- //qualifier
- c = Bytes.compareTo(
- a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(),
- b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength());
- if (c != 0) return c;
-
- //timestamp: later sorts first
- c = -Longs.compare(a.getTimestamp(), b.getTimestamp());
- if (c != 0) return c;
-
- //type
- c = (0xff & a.getTypeByte()) - (0xff & b.getTypeByte());
- if (c != 0) return c;
-
- //mvccVersion: later sorts first
- return -Longs.compare(a.getMvccVersion(), b.getMvccVersion());
- }
-
-
- /**************** equals ****************************/
-
- public static boolean equals(Cell a, Cell b){
- return equalsRow(a, b)
- && equalsFamily(a, b)
- && equalsQualifier(a, b)
- && equalsTimestamp(a, b)
- && equalsType(a, b);
- }
-
- public static boolean equalsRow(Cell a, Cell b){
- return Bytes.equals(
- a.getRowArray(), a.getRowOffset(), a.getRowLength(),
- b.getRowArray(), b.getRowOffset(), b.getRowLength());
- }
-
- public static boolean equalsFamily(Cell a, Cell b){
- return Bytes.equals(
- a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(),
- b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength());
- }
-
- public static boolean equalsQualifier(Cell a, Cell b){
- return Bytes.equals(
- a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(),
- b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength());
- }
-
- public static boolean equalsTimestamp(Cell a, Cell b){
- return a.getTimestamp() == b.getTimestamp();
- }
-
- public static boolean equalsType(Cell a, Cell b){
- return a.getTypeByte() == b.getTypeByte();
- }
-
-
- /********************* hashCode ************************/
-
- /**
- * Returns a hash code that is always the same for two Cells having a matching equals(..) result.
- * Currently does not guard against nulls, but it could if necessary.
- */
- public static int hashCode(Cell cell){
- if (cell == null) {// return 0 for empty Cell
- return 0;
- }
-
- //pre-calculate the 3 hashes made of byte ranges
- int rowHash = Bytes.hashCode(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
- int familyHash = Bytes.hashCode(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
- int qualifierHash = Bytes.hashCode(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
-
- //combine the 6 sub-hashes
- int hash = 31 * rowHash + familyHash;
- hash = 31 * hash + qualifierHash;
- hash = 31 * hash + (int)cell.getTimestamp();
- hash = 31 * hash + cell.getTypeByte();
- hash = 31 * hash + (int)cell.getMvccVersion();
- return hash;
- }
-
-
- /******************** lengths *************************/
-
- public static boolean areKeyLengthsEqual(Cell a, Cell b) {
- return a.getRowLength() == b.getRowLength()
- && a.getFamilyLength() == b.getFamilyLength()
- && a.getQualifierLength() == b.getQualifierLength();
- }
-
- public static boolean areRowLengthsEqual(Cell a, Cell b) {
- return a.getRowLength() == b.getRowLength();
- }
-
-
- /***************** special cases ****************************/
-
- /**
- * special case for KeyValue.equals
- */
- private static int compareStaticIgnoreMvccVersion(Cell a, Cell b) {
- //row
- int c = Bytes.compareTo(
- a.getRowArray(), a.getRowOffset(), a.getRowLength(),
- b.getRowArray(), b.getRowOffset(), b.getRowLength());
- if (c != 0) return c;
-
- //family
- c = Bytes.compareTo(
- a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(),
- b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength());
- if (c != 0) return c;
-
- //qualifier
- c = Bytes.compareTo(
- a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(),
- b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength());
- if (c != 0) return c;
-
- //timestamp: later sorts first
- c = -Longs.compare(a.getTimestamp(), b.getTimestamp());
- if (c != 0) return c;
-
- //type
- c = (0xff & a.getTypeByte()) - (0xff & b.getTypeByte());
- return c;
- }
-
- /**
- * special case for KeyValue.equals
- */
- public static boolean equalsIgnoreMvccVersion(Cell a, Cell b){
- return 0 == compareStaticIgnoreMvccVersion(a, b);
- }
-
-}
diff --git a/hbase-common/src/main/java/org/apache/hbase/cell/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hbase/cell/CellOutputStream.java
deleted file mode 100644
index 6d46ec5..0000000
--- a/hbase-common/src/main/java/org/apache/hbase/cell/CellOutputStream.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hbase.cell;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hbase.Cell;
-
-/**
- * Accepts a stream of Cells and adds them to its internal data structure. This can be used to build
- * a block of cells during compactions and flushes, or to build a byte[] to send to the client. This
- * could be backed by a List, but more efficient implementations will append results to a
- * byte[] to eliminate overhead, and possibly encode the cells further.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface CellOutputStream {
-
- /**
- * Implementation must copy the entire state of the Cell. If the appended Cell is modified
- * immediately after the append method returns, the modifications must have absolutely no effect
- * on the copy of the Cell that was added to the appender. For example, calling someList.add(cell)
- * is not correct.
- */
- void write(Cell cell);
-
- /**
- * Let the implementation decide what to do. Usually means writing accumulated data into a byte[]
- * that can then be read from the implementation to be sent to disk, put in the block cache, or
- * sent over the network.
- */
- void flush() throws IOException;
-
-}
diff --git a/hbase-common/src/main/java/org/apache/hbase/cell/CellScannerPosition.java b/hbase-common/src/main/java/org/apache/hbase/cell/CellScannerPosition.java
deleted file mode 100644
index eeadf5f..0000000
--- a/hbase-common/src/main/java/org/apache/hbase/cell/CellScannerPosition.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hbase.cell;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * An indicator of the state of the scanner after an operation such as nextCell() or positionAt(..).
- * For example:
- *
- * - In a DataBlockScanner, the AFTER_LAST position indicates to the parent StoreFileScanner that
- * it should load the next block.
- * - In a StoreFileScanner, the AFTER_LAST position indicates that the file has been exhausted.
- * - In a RegionScanner, the AFTER_LAST position indicates that the scanner should move to the
- * next region.
- *
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public enum CellScannerPosition {
-
- /**
- * getCurrentCell() will NOT return a valid cell. Calling nextCell() will advance to the first
- * cell.
- */
- BEFORE_FIRST,
-
- /**
- * getCurrentCell() will return a valid cell, but it is not the cell requested by positionAt(..),
- * rather it is the nearest cell before the requested cell.
- */
- BEFORE,
-
- /**
- * getCurrentCell() will return a valid cell, and it is exactly the cell that was requested by
- * positionAt(..).
- */
- AT,
-
- /**
- * getCurrentCell() will return a valid cell, but it is not the cell requested by positionAt(..),
- * rather it is the nearest cell after the requested cell.
- */
- AFTER,
-
- /**
- * getCurrentCell() will NOT return a valid cell. Calling nextCell() will have no effect.
- */
- AFTER_LAST
-
-}
diff --git a/hbase-common/src/main/java/org/apache/hbase/cell/CellTool.java b/hbase-common/src/main/java/org/apache/hbase/cell/CellTool.java
deleted file mode 100644
index 229ca36..0000000
--- a/hbase-common/src/main/java/org/apache/hbase/cell/CellTool.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hbase.cell;
-
-import java.nio.ByteBuffer;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.util.ByteRange;
-import org.apache.hbase.Cell;
-
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class CellTool {
-
- /******************* ByteRange *******************************/
-
- public static ByteRange fillRowRange(Cell cell, ByteRange range) {
- return range.set(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
- }
-
- public static ByteRange fillFamilyRange(Cell cell, ByteRange range) {
- return range.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
- }
-
- public static ByteRange fillQualifierRange(Cell cell, ByteRange range) {
- return range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
- cell.getQualifierLength());
- }
-
-
- /***************** get individual arrays for tests ************/
-
- public static byte[] getRowArray(Cell cell){
- byte[] output = new byte[cell.getRowLength()];
- copyRowTo(cell, output, 0);
- return output;
- }
-
- public static byte[] getFamilyArray(Cell cell){
- byte[] output = new byte[cell.getFamilyLength()];
- copyFamilyTo(cell, output, 0);
- return output;
- }
-
- public static byte[] getQualifierArray(Cell cell){
- byte[] output = new byte[cell.getQualifierLength()];
- copyQualifierTo(cell, output, 0);
- return output;
- }
-
- public static byte[] getValueArray(Cell cell){
- byte[] output = new byte[cell.getValueLength()];
- copyValueTo(cell, output, 0);
- return output;
- }
-
-
- /******************** copyTo **********************************/
-
- public static int copyRowTo(Cell cell, byte[] destination, int destinationOffset) {
- System.arraycopy(cell.getRowArray(), cell.getRowOffset(), destination, destinationOffset,
- cell.getRowLength());
- return destinationOffset + cell.getRowLength();
- }
-
- public static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset) {
- System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), destination, destinationOffset,
- cell.getFamilyLength());
- return destinationOffset + cell.getFamilyLength();
- }
-
- public static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset) {
- System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), destination,
- destinationOffset, cell.getQualifierLength());
- return destinationOffset + cell.getQualifierLength();
- }
-
- public static int copyValueTo(Cell cell, byte[] destination, int destinationOffset) {
- System.arraycopy(cell.getValueArray(), cell.getValueOffset(), destination, destinationOffset,
- cell.getValueLength());
- return destinationOffset + cell.getValueLength();
- }
-
-
- /********************* misc *************************************/
-
- public static byte getRowByte(Cell cell, int index) {
- return cell.getRowArray()[cell.getRowOffset() + index];
- }
-
-
- /********************** KeyValue (move to KeyValueUtils) *********************/
-
- public static ByteBuffer getValueBufferShallowCopy(Cell cell) {
- ByteBuffer buffer = ByteBuffer.wrap(cell.getValueArray(), cell.getValueOffset(),
- cell.getValueLength());
-// buffer.position(buffer.limit());//make it look as if value was appended
- return buffer;
- }
-
-}
diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/BaseDecoder.java b/hbase-common/src/main/java/org/apache/hbase/codec/BaseDecoder.java
new file mode 100644
index 0000000..0c941df
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hbase/codec/BaseDecoder.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase.codec;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.hbase.Cell;
+
+abstract class BaseDecoder implements Codec.Decoder {
+ final InputStream in;
+ private boolean hasNext = true;
+ private Cell current = null;
+
+ BaseDecoder(final InputStream in) {
+ this.in = in;
+ }
+
+ @Override
+ public boolean advance() {
+ if (!this.hasNext) return this.hasNext;
+ try {
+ if (this.in.available() <= 0) {
+ this.hasNext = false;
+ return this.hasNext;
+ }
+ this.current = parseCell();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ return this.hasNext;
+ }
+
+ /**
+ * @return extract a Cell
+ * @throws IOException
+ */
+ abstract Cell parseCell() throws IOException;
+
+ @Override
+ public Cell current() {
+ return this.current;
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/BaseEncoder.java b/hbase-common/src/main/java/org/apache/hbase/codec/BaseEncoder.java
new file mode 100644
index 0000000..8a2bb2e
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hbase/codec/BaseEncoder.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase.codec;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+import org.apache.hbase.Cell;
+
+abstract class BaseEncoder implements Codec.Encoder {
+ protected final OutputStream out;
+ // This encoder is 'done' once flush has been called.
+ protected boolean flushed = false;
+
+ public BaseEncoder(final OutputStream out) {
+ this.out = out;
+ }
+
+ @Override
+ public abstract void write(Cell cell) throws IOException;
+
+ void checkFlushed() throws CodecException {
+ if (this.flushed) throw new CodecException("Flushed; done");
+ }
+
+ @Override
+ public void flush() throws IOException {
+ if (this.flushed) return;
+ this.flushed = true;
+ try {
+ this.out.flush();
+ } catch (IOException e) {
+ throw new CodecException(e);
+ }
+ }
+}
\ No newline at end of file
diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/CellCodec.java b/hbase-common/src/main/java/org/apache/hbase/codec/CellCodec.java
new file mode 100644
index 0000000..8fa4f8c
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hbase/codec/CellCodec.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase.codec;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hbase.Cell;
+import org.apache.hbase.CellUtil;
+
+/**
+ * Basic Cell codec that just writes out all the individual elements of a Cell. Uses ints
+ * delimiting all lengths. Profligate. Needs tune up. Does not write the mvcc stamp.
+ * Use a different codec if you want that in the stream.
+ */
+public class CellCodec implements Codec {
+ class CellEncoder extends BaseEncoder {
+ CellEncoder(final OutputStream out) {
+ super(out);
+ }
+
+ @Override
+ public void write(Cell cell) throws IOException {
+ checkFlushed();
+ try {
+ // Row
+ write(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
+ // Column family
+ write(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
+ // Qualifier
+ write(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
+ // Version
+ this.out.write(Bytes.toBytes(cell.getTimestamp()));
+ // Type
+ this.out.write(cell.getTypeByte());
+ // Value
+ write(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
+ } catch (IOException e) {
+ throw new CodecException(e);
+ }
+ }
+
+ /**
+ * Write int length followed by array bytes.
+ * @param bytes
+ * @param offset
+ * @param length
+ * @throws IOException
+ */
+ private void write(final byte [] bytes, final int offset, final int length)
+ throws IOException {
+ this.out.write(Bytes.toBytes(length));
+ this.out.write(bytes, offset, length);
+ }
+ }
+
+ class CellDecoder extends BaseDecoder {
+ public CellDecoder(final InputStream in) {
+ super(in);
+ }
+
+ Cell parseCell() throws IOException {
+ byte [] row = readByteArray(this.in);
+ byte [] family = readByteArray(in);
+ byte [] qualifier = readByteArray(in);
+ byte [] longArray = new byte[Bytes.SIZEOF_LONG];
+ IOUtils.readFully(this.in, longArray);
+ long timestamp = Bytes.toLong(longArray);
+ byte type = (byte) this.in.read();
+ byte [] value = readByteArray(in);
+ return CellUtil.createCell(row, family, qualifier, timestamp, type, value);
+ }
+
+ /**
+ * @return Byte array read from the stream.
+ * @throws IOException
+ */
+ private byte [] readByteArray(final InputStream in) throws IOException {
+ byte [] intArray = new byte[Bytes.SIZEOF_INT];
+ in.read(intArray, 0, Bytes.SIZEOF_INT);
+ int length = Bytes.toInt(intArray);
+ byte [] bytes = new byte [length];
+ IOUtils.readFully(in, bytes);
+ return bytes;
+ }
+ }
+
+ @Override
+ public Decoder getDecoder(InputStream is) {
+ return new CellDecoder(is);
+ }
+
+ @Override
+ public Encoder getEncoder(OutputStream os) {
+ return new CellEncoder(os);
+ }
+}
\ No newline at end of file
diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/Codec.java b/hbase-common/src/main/java/org/apache/hbase/codec/Codec.java
new file mode 100644
index 0000000..6e15363
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hbase/codec/Codec.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase.codec;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
+import org.apache.hbase.CellScanner;
+import org.apache.hbase.io.CellOutputStream;
+
+/**
+ * Encoder/Decoder for Cell.
+ *
+ * Like {@link DataBlockEncoder} only Cell-based rather than KeyValue version 1 based
+ * and without presuming an hfile context. Intent is an Interface that will work for hfile and
+ * rpc.
+ */
+public interface Codec {
+ // TODO: interfacing with {@link DataBlockEncoder}
+ /**
+ * Call flush when done. Some encoders may not put anything on the stream until flush is called.
+ * On flush, let go of any resources used by the encoder.
+ */
+ public interface Encoder extends CellOutputStream {}
+
+ /**
+ * Implementations should implicitly clean up any resources allocated when the Decoder/CellScanner
+ * runs off the end of the cell block. Do this rather than require the user call close explicitly.
+ */
+ public interface Decoder extends CellScanner {};
+
+ Decoder getDecoder(InputStream is);
+ Encoder getEncoder(OutputStream os);
+}
diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/CodecException.java b/hbase-common/src/main/java/org/apache/hbase/codec/CodecException.java
new file mode 100644
index 0000000..352e1a6
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hbase/codec/CodecException.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase.codec;
+
+import java.io.IOException;
+
+public class CodecException extends IOException {
+ private static final long serialVersionUID = -2850095011686914405L;
+
+ public CodecException() {
+ }
+
+ public CodecException(String message) {
+ super(message);
+ }
+
+ public CodecException(Throwable t) {
+ super(t);
+ }
+
+ public CodecException(String message, Throwable t) {
+ super(message, t);
+ }
+}
\ No newline at end of file
diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/KeyValueCodec.java b/hbase-common/src/main/java/org/apache/hbase/codec/KeyValueCodec.java
new file mode 100644
index 0000000..4cb648e
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hbase/codec/KeyValueCodec.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase.codec;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hbase.Cell;
+
+/**
+ * Codec that does KeyValue version 1 serialization.
+ *
+ * Encodes by casting Cell to KeyValue and writing out the backing array with a length prefix.
+ * This is how KVs were serialized in Puts, Deletes and Results pre-0.96. Its what would
+ * happen if you called the Writable#write KeyValue implementation. This encoder will fail
+ * if the passed Cell is not an old-school pre-0.96 KeyValue. Does not copy bytes writing.
+ * It just writes them direct to the passed stream.
+ *
+ * If you wrote two KeyValues to this encoder, it would look like this in the stream:
+ *
+ * length-of-KeyValue1 // A java int with the length of KeyValue1 backing array
+ * KeyValue1 backing array filled with a KeyValue serialized in its particular format
+ * length-of-KeyValue2
+ * KeyValue2 backing array
+ *
+ */
+public class KeyValueCodec implements Codec {
+ class KeyValueEncoder extends BaseEncoder {
+ KeyValueEncoder(final OutputStream out) {
+ super(out);
+ }
+
+ @Override
+ public void write(Cell cell) throws IOException {
+ checkFlushed();
+ // This is crass and will not work when KV changes. Also if passed a non-kv Cell, it will
+ // make expensive copy.
+ try {
+ KeyValue.oswrite((KeyValue)KeyValueUtil.ensureKeyValue(cell), this.out);
+ } catch (IOException e) {
+ throw new CodecException(e);
+ }
+ }
+ }
+
+ class KeyValueDecoder extends BaseDecoder {
+ KeyValueDecoder(final InputStream in) {
+ super(in);
+ }
+
+ Cell parseCell() throws IOException {
+ return KeyValue.iscreate(in);
+ }
+ }
+
+ /**
+ * Implementation depends on {@link InputStream#available()}
+ */
+ @Override
+ public Decoder getDecoder(final InputStream is) {
+ return new KeyValueDecoder(is);
+ }
+
+ @Override
+ public Encoder getEncoder(OutputStream os) {
+ return new KeyValueEncoder(os);
+ }
+}
\ No newline at end of file
diff --git a/hbase-common/src/main/java/org/apache/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hbase/io/CellOutputStream.java
new file mode 100644
index 0000000..f832e8d
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hbase/io/CellOutputStream.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hbase.io;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hbase.Cell;
+import org.apache.hbase.CellScanner;
+
+/**
+ * Accepts a stream of Cells. This can be used to build a block of cells during compactions
+ * and flushes, or to build a byte[] to send to the client. This could be backed by a
+ * List, but more efficient implementations will append results to a
+ * byte[] to eliminate overhead, and possibly encode the cells further.
+ * To read Cells, use {@link CellScanner}
+ * @see CellScanner
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface CellOutputStream {
+ /**
+ * Implementation must copy the entire state of the Cell. If the written Cell is modified
+ * immediately after the write method returns, the modifications must have absolutely no effect
+ * on the copy of the Cell that was added in the write.
+ * @param cell Cell to write out
+ * @throws IOException
+ */
+ void write(Cell cell) throws IOException;
+
+ /**
+ * Let the implementation decide what to do. Usually means writing accumulated data into a byte[]
+ * that can then be read from the implementation to be sent to disk, put in the block cache, or
+ * sent over the network.
+ * @throws IOException
+ */
+ void flush() throws IOException;
+}
\ No newline at end of file
diff --git a/hbase-common/src/test/java/org/apache/hbase/TestCellUtil.java b/hbase-common/src/test/java/org/apache/hbase/TestCellUtil.java
new file mode 100644
index 0000000..930c7d1
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hbase/TestCellUtil.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+public class TestCellUtil {
+ @Test
+ public void testCreateCellScannerCellList() {
+ final int count = 3;
+ Cell [] cs = getCells(count, Bytes.toBytes(0));
+ List cells = Arrays.asList(cs);
+ CellScanner scanner = CellUtil.createCellScanner(cells);
+ int i = 0;
+ while (scanner.advance()) {
+ i++;
+ }
+ assertEquals(count, i);
+ }
+
+ @Test
+ public void testCreateCellScannerFamilyMap() {
+ final int count = 3;
+ final NavigableMap> map =
+ new TreeMap>(Bytes.BYTES_COMPARATOR);
+ for (int i = 0; i < count; i++) {
+ byte [] key = Bytes.toBytes(i);
+ Cell [] cs = getCells(count, key);
+ map.put(key, Arrays.asList(cs));
+ }
+ CellScanner scanner = CellUtil.createCellScanner(map);
+ int i = 0;
+ while (scanner.advance()) {
+ i++;
+ }
+ assertEquals(count * count, i);
+ }
+
+ static Cell [] getCells(final int howMany, final byte [] family) {
+ Cell [] cells = new Cell[howMany];
+ for (int i = 0; i < howMany; i++) {
+ byte [] index = Bytes.toBytes(i);
+ KeyValue kv = new KeyValue(index, family, index, index);
+ cells[i] = kv;
+ }
+ return cells;
+ }
+}
\ No newline at end of file
diff --git a/hbase-common/src/test/java/org/apache/hbase/codec/TestCellCodec.java b/hbase-common/src/test/java/org/apache/hbase/codec/TestCellCodec.java
new file mode 100644
index 0000000..2ebb835
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hbase/codec/TestCellCodec.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase.codec;
+
+import static org.junit.Assert.*;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hbase.Cell;
+import org.apache.hbase.CellComparator;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.io.CountingInputStream;
+import com.google.common.io.CountingOutputStream;
+
+@Category(SmallTests.class)
+public class TestCellCodec {
+
+ @Test
+ public void testEmptyWorks() throws IOException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ CountingOutputStream cos = new CountingOutputStream(baos);
+ DataOutputStream dos = new DataOutputStream(cos);
+ Codec codec = new CellCodec();
+ Codec.Encoder encoder = codec.getEncoder(dos);
+ encoder.flush();
+ dos.close();
+ long offset = cos.getCount();
+ assertEquals(0, offset);
+ CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
+ DataInputStream dis = new DataInputStream(cis);
+ Codec.Decoder decoder = codec.getDecoder(dis);
+ assertFalse(decoder.advance());
+ dis.close();
+ assertEquals(0, cis.getCount());
+ }
+
+ @Test
+ public void testOne() throws IOException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ CountingOutputStream cos = new CountingOutputStream(baos);
+ DataOutputStream dos = new DataOutputStream(cos);
+ Codec codec = new CellCodec();
+ Codec.Encoder encoder = codec.getEncoder(dos);
+ final KeyValue kv = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
+ encoder.write(kv);
+ encoder.flush();
+ dos.close();
+ long offset = cos.getCount();
+ CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
+ DataInputStream dis = new DataInputStream(cis);
+ Codec.Decoder decoder = codec.getDecoder(dis);
+ assertTrue(decoder.advance()); // First read should pull in the KV
+ assertFalse(decoder.advance()); // Second read should trip over the end-of-stream marker and return false
+ dis.close();
+ assertEquals(offset, cis.getCount());
+ }
+
+ @Test
+ public void testThree() throws IOException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ CountingOutputStream cos = new CountingOutputStream(baos);
+ DataOutputStream dos = new DataOutputStream(cos);
+ Codec codec = new CellCodec();
+ Codec.Encoder encoder = codec.getEncoder(dos);
+ final KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("1"));
+ final KeyValue kv2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"), Bytes.toBytes("2"));
+ final KeyValue kv3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"), Bytes.toBytes("3"));
+ encoder.write(kv1);
+ encoder.write(kv2);
+ encoder.write(kv3);
+ encoder.flush();
+ dos.close();
+ long offset = cos.getCount();
+ CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
+ DataInputStream dis = new DataInputStream(cis);
+ Codec.Decoder decoder = codec.getDecoder(dis);
+ assertTrue(decoder.advance());
+ Cell c = decoder.current();
+ assertTrue(CellComparator.equals(c, kv1));
+ assertTrue(decoder.advance());
+ c = decoder.current();
+ assertTrue(CellComparator.equals(c, kv2));
+ assertTrue(decoder.advance());
+ c = decoder.current();
+ assertTrue(CellComparator.equals(c, kv3));
+ assertFalse(decoder.advance());
+ dis.close();
+ assertEquals(offset, cis.getCount());
+ }
+}
\ No newline at end of file
diff --git a/hbase-common/src/test/java/org/apache/hbase/codec/TestKeyValueCodec.java b/hbase-common/src/test/java/org/apache/hbase/codec/TestKeyValueCodec.java
new file mode 100644
index 0000000..9f2428f
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hbase/codec/TestKeyValueCodec.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase.codec;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.io.CountingInputStream;
+import com.google.common.io.CountingOutputStream;
+
+@Category(SmallTests.class)
+public class TestKeyValueCodec {
+ @Test
+ public void testEmptyWorks() throws IOException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ CountingOutputStream cos = new CountingOutputStream(baos);
+ DataOutputStream dos = new DataOutputStream(cos);
+ KeyValueCodec kvc = new KeyValueCodec();
+ Codec.Encoder encoder = kvc.getEncoder(dos);
+ encoder.flush();
+ dos.close();
+ long offset = cos.getCount();
+ assertEquals(0, offset);
+ CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
+ DataInputStream dis = new DataInputStream(cis);
+ Codec.Decoder decoder = kvc.getDecoder(dis);
+ assertFalse(decoder.advance());
+ dis.close();
+ assertEquals(0, cis.getCount());
+ }
+
+ @Test
+ public void testOne() throws IOException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ CountingOutputStream cos = new CountingOutputStream(baos);
+ DataOutputStream dos = new DataOutputStream(cos);
+ KeyValueCodec kvc = new KeyValueCodec();
+ Codec.Encoder encoder = kvc.getEncoder(dos);
+ final KeyValue kv = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
+ final long length = kv.getLength() + Bytes.SIZEOF_INT;
+ encoder.write(kv);
+ encoder.flush();
+ dos.close();
+ long offset = cos.getCount();
+ assertEquals(length, offset);
+ CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
+ DataInputStream dis = new DataInputStream(cis);
+ Codec.Decoder decoder = kvc.getDecoder(dis);
+ assertTrue(decoder.advance()); // First read should pull in the KV
+ assertFalse(decoder.advance()); // Second read should trip over the end-of-stream marker and return false
+ dis.close();
+ assertEquals(length, cis.getCount());
+ }
+
+ @Test
+ public void testThree() throws IOException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ CountingOutputStream cos = new CountingOutputStream(baos);
+ DataOutputStream dos = new DataOutputStream(cos);
+ KeyValueCodec kvc = new KeyValueCodec();
+ Codec.Encoder encoder = kvc.getEncoder(dos);
+ final KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("1"));
+ final KeyValue kv2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"), Bytes.toBytes("2"));
+ final KeyValue kv3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"), Bytes.toBytes("3"));
+ final long length = kv1.getLength() + Bytes.SIZEOF_INT;
+ encoder.write(kv1);
+ encoder.write(kv2);
+ encoder.write(kv3);
+ encoder.flush();
+ dos.close();
+ long offset = cos.getCount();
+ assertEquals(length * 3, offset);
+ CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
+ DataInputStream dis = new DataInputStream(cis);
+ Codec.Decoder decoder = kvc.getDecoder(dis);
+ assertTrue(decoder.advance());
+ KeyValue kv = (KeyValue)decoder.current();
+ assertTrue(kv1.equals(kv));
+ assertTrue(decoder.advance());
+ kv = (KeyValue)decoder.current();
+ assertTrue(kv2.equals(kv));
+ assertTrue(decoder.advance());
+ kv = (KeyValue)decoder.current();
+ assertTrue(kv3.equals(kv));
+ assertFalse(decoder.advance());
+ dis.close();
+ assertEquals((length * 3), cis.getCount());
+ }
+}
\ No newline at end of file
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/PrefixTreeCodec.java b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/PrefixTreeCodec.java
index 3c5349c..d6a80b2 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/PrefixTreeCodec.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/PrefixTreeCodec.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.KeyValue.MetaKeyComparator;
import org.apache.hadoop.hbase.KeyValue.RootKeyComparator;
-import org.apache.hadoop.hbase.KeyValueTool;
+import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -97,7 +97,7 @@ public class PrefixTreeCodec implements DataBlockEncoder{
try{
KeyValue kv;
- while ((kv = KeyValueTool.nextShallowCopy(rawKeyValues, includesMvccVersion)) != null) {
+ while ((kv = KeyValueUtil.nextShallowCopy(rawKeyValues, includesMvccVersion)) != null) {
builder.write(kv);
}
builder.flush();
@@ -132,13 +132,13 @@ public class PrefixTreeCodec implements DataBlockEncoder{
CellSearcher searcher = null;
try {
searcher = DecoderFactory.checkOut(sourceAsBuffer, includesMvccVersion);
- while (searcher.next()) {
- KeyValue currentCell = KeyValueTool.copyToNewKeyValue(searcher.getCurrent());
+ while (searcher.advance()) {
+ KeyValue currentCell = KeyValueUtil.copyToNewKeyValue(searcher.current());
// needs to be modified for DirectByteBuffers. no existing methods to
// write VLongs to byte[]
int offset = result.arrayOffset() + result.position();
- KeyValueTool.appendToByteArray(currentCell, result.array(), offset);
- int keyValueLength = KeyValueTool.length(currentCell);
+ KeyValueUtil.appendToByteArray(currentCell, result.array(), offset);
+ int keyValueLength = KeyValueUtil.length(currentCell);
ByteBufferUtils.skip(result, keyValueLength);
offset += keyValueLength;
if (includesMvccVersion) {
@@ -163,7 +163,7 @@ public class PrefixTreeCodec implements DataBlockEncoder{
if (!searcher.positionAtFirstCell()) {
return null;
}
- return KeyValueTool.copyKeyToNewByteBuffer(searcher.getCurrent());
+ return KeyValueUtil.copyKeyToNewByteBuffer(searcher.current());
} finally {
DecoderFactory.checkIn(searcher);
}
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/PrefixTreeSeeker.java b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/PrefixTreeSeeker.java
index fe8d155..ae8df97 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/PrefixTreeSeeker.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/PrefixTreeSeeker.java
@@ -22,13 +22,13 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueTool;
+import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder.EncodedSeeker;
import org.apache.hbase.Cell;
-import org.apache.hbase.cell.CellScannerPosition;
-import org.apache.hbase.cell.CellTool;
+import org.apache.hbase.CellUtil;
import org.apache.hbase.codec.prefixtree.decode.DecoderFactory;
import org.apache.hbase.codec.prefixtree.decode.PrefixTreeArraySearcher;
+import org.apache.hbase.codec.prefixtree.scanner.CellScannerPosition;
/**
* These methods have the same definition as any implementation of the EncodedSeeker.
@@ -69,13 +69,13 @@ public class PrefixTreeSeeker implements EncodedSeeker {
@Override
public ByteBuffer getKeyDeepCopy() {
- return KeyValueTool.copyKeyToNewByteBuffer(ptSearcher.getCurrent());
+ return KeyValueUtil.copyKeyToNewByteBuffer(ptSearcher.current());
}
@Override
public ByteBuffer getValueShallowCopy() {
- return CellTool.getValueBufferShallowCopy(ptSearcher.getCurrent());
+ return CellUtil.getValueBufferShallowCopy(ptSearcher.current());
}
/**
@@ -83,7 +83,7 @@ public class PrefixTreeSeeker implements EncodedSeeker {
*/
@Override
public ByteBuffer getKeyValueBuffer() {
- return KeyValueTool.copyToNewByteBuffer(ptSearcher.getCurrent());
+ return KeyValueUtil.copyToNewByteBuffer(ptSearcher.current());
}
/**
@@ -91,7 +91,7 @@ public class PrefixTreeSeeker implements EncodedSeeker {
*/
@Override
public KeyValue getKeyValue() {
- return KeyValueTool.copyToNewKeyValue(ptSearcher.getCurrent());
+ return KeyValueUtil.copyToNewKeyValue(ptSearcher.current());
}
/**
@@ -104,9 +104,8 @@ public class PrefixTreeSeeker implements EncodedSeeker {
* The goal will be to transition the upper layers of HBase, like Filters and KeyValueHeap, to use
* this method instead of the getKeyValue() methods above.
*/
-// @Override
- public Cell getCurrent() {
- return ptSearcher.getCurrent();
+ public Cell get() {
+ return ptSearcher.current();
}
@Override
@@ -116,12 +115,12 @@ public class PrefixTreeSeeker implements EncodedSeeker {
@Override
public boolean next() {
- return ptSearcher.next();
+ return ptSearcher.advance();
}
// @Override
public boolean advance() {
- return ptSearcher.next();
+ return ptSearcher.advance();
}
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java
index 787cbc2..c79a97d 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java
@@ -20,13 +20,13 @@ package org.apache.hbase.codec.prefixtree.decode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hbase.Cell;
-import org.apache.hbase.cell.CellComparator;
+import org.apache.hbase.CellComparator;
+import org.apache.hbase.CellScanner;
import org.apache.hbase.codec.prefixtree.PrefixTreeBlockMeta;
import org.apache.hbase.codec.prefixtree.decode.column.ColumnReader;
import org.apache.hbase.codec.prefixtree.decode.row.RowNodeReader;
import org.apache.hbase.codec.prefixtree.decode.timestamp.MvccVersionDecoder;
import org.apache.hbase.codec.prefixtree.decode.timestamp.TimestampDecoder;
-import org.apache.hbase.codec.prefixtree.scanner.CellScanner;
/**
* Extends PtCell and manipulates its protected fields. Could alternatively contain a PtCell and
@@ -111,7 +111,7 @@ public class PrefixTreeArrayScanner extends PrefixTreeCell implements CellScanne
resetToBeforeFirstEntry();
}
- @Override
+ // Does this have to be in the CellScanner Interface? TODO
public void resetToBeforeFirstEntry() {
beforeFirst = true;
afterLast = false;
@@ -142,14 +142,13 @@ public class PrefixTreeArrayScanner extends PrefixTreeCell implements CellScanne
/********************** CellScanner **********************/
@Override
- public PrefixTreeCell getCurrent() {
+ public Cell current() {
if(isOutOfBounds()){
return null;
}
- return this;
+ return (Cell)this;
}
-
/******************* Object methods ************************/
@Override
@@ -168,11 +167,11 @@ public class PrefixTreeArrayScanner extends PrefixTreeCell implements CellScanne
*/
@Override
public String toString() {
- PrefixTreeCell currentCell = getCurrent();
+ Cell currentCell = current();
if(currentCell==null){
return "null";
}
- return currentCell.getKeyValueString();
+ return ((PrefixTreeCell)currentCell).getKeyValueString();
}
@@ -180,11 +179,11 @@ public class PrefixTreeArrayScanner extends PrefixTreeCell implements CellScanne
public boolean positionAtFirstCell() {
reInitFirstNode();
- return next();
+ return advance();
}
@Override
- public boolean next() {
+ public boolean advance() {
if (afterLast) {
return false;
}
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
index 56e33bd..b39cc77 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
@@ -20,9 +20,9 @@ package org.apache.hbase.codec.prefixtree.decode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hbase.Cell;
-import org.apache.hbase.cell.CellScannerPosition;
-import org.apache.hbase.cell.CellTool;
+import org.apache.hbase.CellUtil;
import org.apache.hbase.codec.prefixtree.PrefixTreeBlockMeta;
+import org.apache.hbase.codec.prefixtree.scanner.CellScannerPosition;
import org.apache.hbase.codec.prefixtree.scanner.CellSearcher;
import com.google.common.primitives.UnsignedBytes;
@@ -87,7 +87,7 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
}
//keep hunting for the rest of the row
- byte searchForByte = CellTool.getRowByte(key, currentNodeDepth);
+ byte searchForByte = CellUtil.getRowByte(key, currentNodeDepth);
fanIndex = currentRowNode.whichFanNode(searchForByte);
if(fanIndex < 0){//no matching row. return early
int insertionPoint = -fanIndex;
@@ -132,7 +132,7 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
}
//keep hunting for the rest of the row
- byte searchForByte = CellTool.getRowByte(key, currentNodeDepth);
+ byte searchForByte = CellUtil.getRowByte(key, currentNodeDepth);
fanIndex = currentRowNode.whichFanNode(searchForByte);
if(fanIndex < 0){//no matching row. return early
int insertionPoint = -fanIndex;
@@ -234,7 +234,7 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
if (beforeOnMiss) {
return CellScannerPosition.BEFORE;
}
- if (next()) {
+ if (advance()) {
return CellScannerPosition.AFTER;
}
return CellScannerPosition.AFTER_LAST;
@@ -279,7 +279,7 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
if (i >= key.getRowLength()) {// key was shorter, so it's first
return -1;
}
- byte keyByte = CellTool.getRowByte(key, i);
+ byte keyByte = CellUtil.getRowByte(key, i);
byte thisByte = rowBuffer[i];
if (keyByte == thisByte) {
continue;
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeCell.java b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeCell.java
index d34014b..040bf19 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeCell.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/decode/PrefixTreeCell.java
@@ -20,9 +20,9 @@ package org.apache.hbase.codec.prefixtree.decode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueTool;
+import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hbase.Cell;
-import org.apache.hbase.cell.CellComparator;
+import org.apache.hbase.CellComparator;
/**
* As the PrefixTreeArrayScanner moves through the tree bytes, it changes the values in the fields
@@ -95,7 +95,7 @@ public class PrefixTreeCell implements Cell, Comparable {
public int hashCode(){
//Temporary hack to maintain backwards compatibility with KeyValue.hashCode
//I don't think this is used in any hot code paths
- return KeyValueTool.copyToNewKeyValue(this).hashCode();
+ return KeyValueUtil.copyToNewKeyValue(this).hashCode();
//TODO return CellComparator.hashCode(this);//see HBASE-6907
}
@@ -190,7 +190,7 @@ public class PrefixTreeCell implements Cell, Comparable {
* Need this separate method so we can call it from subclasses' toString() methods
*/
protected String getKeyValueString(){
- KeyValue kv = KeyValueTool.copyToNewKeyValue(this);
+ KeyValue kv = KeyValueUtil.copyToNewKeyValue(this);
return kv.toString();
}
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java
index 8adf110..af91cd3 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java
@@ -24,19 +24,19 @@ import java.io.OutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.KeyValueTool;
+import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.util.ArrayUtils;
import org.apache.hadoop.hbase.util.ByteRange;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hbase.Cell;
-import org.apache.hbase.cell.CellOutputStream;
-import org.apache.hbase.cell.CellTool;
+import org.apache.hbase.CellUtil;
import org.apache.hbase.codec.prefixtree.PrefixTreeBlockMeta;
import org.apache.hbase.codec.prefixtree.encode.column.ColumnSectionWriter;
import org.apache.hbase.codec.prefixtree.encode.other.CellTypeEncoder;
import org.apache.hbase.codec.prefixtree.encode.other.LongEncoder;
import org.apache.hbase.codec.prefixtree.encode.row.RowSectionWriter;
import org.apache.hbase.codec.prefixtree.encode.tokenize.Tokenizer;
+import org.apache.hbase.io.CellOutputStream;
import org.apache.hbase.util.byterange.ByteRangeSet;
import org.apache.hbase.util.byterange.impl.ByteRangeHashSet;
import org.apache.hbase.util.byterange.impl.ByteRangeTreeSet;
@@ -255,7 +255,7 @@ public class PrefixTreeEncoder implements CellOutputStream {
public void write(Cell cell) {
ensurePerCellCapacities();
- rowTokenizer.addSorted(CellTool.fillRowRange(cell, rowRange));
+ rowTokenizer.addSorted(CellUtil.fillRowRange(cell, rowRange));
addFamilyPart(cell);
addQualifierPart(cell);
addAfterRowFamilyQualifier(cell);
@@ -291,26 +291,26 @@ public class PrefixTreeEncoder implements CellOutputStream {
totalValueBytes += cell.getValueLength();
// double the array each time we run out of space
values = ArrayUtils.growIfNecessary(values, totalValueBytes, 2 * totalValueBytes);
- CellTool.copyValueTo(cell, values, valueOffsets[totalCells]);
+ CellUtil.copyValueTo(cell, values, valueOffsets[totalCells]);
if (cell.getValueLength() > maxValueLength) {
maxValueLength = cell.getValueLength();
}
valueOffsets[totalCells + 1] = totalValueBytes;
// general
- totalUnencodedBytes += KeyValueTool.length(cell);
+ totalUnencodedBytes += KeyValueUtil.length(cell);
++totalCells;
}
private void addFamilyPart(Cell cell) {
if (MULITPLE_FAMILIES_POSSIBLE || totalCells == 0) {
- CellTool.fillFamilyRange(cell, familyRange);
+ CellUtil.fillFamilyRange(cell, familyRange);
familyDeduplicator.add(familyRange);
}
}
private void addQualifierPart(Cell cell) {
- CellTool.fillQualifierRange(cell, qualifierRange);
+ CellUtil.fillQualifierRange(cell, qualifierRange);
qualifierDeduplicator.add(qualifierRange);
}
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/CellScanner.java b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/CellScanner.java
index 29c9ff4..e69de29 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/CellScanner.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/CellScanner.java
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hbase.codec.prefixtree.scanner;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hbase.Cell;
-
-/**
- * Alternate name may be CellInputStream
- *
- * An interface for iterating through a sequence of cells. Similar to Java's Iterator, but without
- * the hasNext() or remove() methods. The hasNext() method is problematic because it may require
- * actually loading the next object, which in turn requires storing the previous object somewhere.
- * The core data block decoder should be as fast as possible, so we push the complexity and
- * performance expense of concurrently tracking multiple cells to layers above the CellScanner.
- *
- * The getCurrentCell() method will return a reference to a Cell implementation. This reference may
- * or may not point to a reusable cell implementation, so users of the CellScanner should not, for
- * example, accumulate a List of Cells. All of the references may point to the same object, which
- * would be the latest state of the underlying Cell. In short, the Cell is mutable.
- *
- * At a minimum, an implementation will need to be able to advance from one cell to the next in a
- * LinkedList fashion. The nextQualifier(), nextFamily(), and nextRow() methods can all be
- * implemented by calling nextCell(), however, if the DataBlockEncoding supports random access into
- * the block then it may provide smarter versions of these methods.
- *
- * Typical usage:
- *
- *
- * while (scanner.nextCell()) {
- * Cell cell = scanner.getCurrentCell();
- * // do something
- * }
- *
- */
-@InterfaceAudience.Private
-public interface CellScanner{
-
- /**
- * Reset any state in the scanner so it appears it was freshly opened.
- */
- void resetToBeforeFirstEntry();
-
- /**
- * @return the current Cell which may be mutable
- */
- Cell getCurrent();
-
- /**
- * Advance the scanner 1 cell.
- * @return true if the next cell is found and getCurrentCell() will return a valid Cell
- */
- boolean next();
-
-}
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/CellScannerPosition.java b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/CellScannerPosition.java
new file mode 100644
index 0000000..612e93e
--- /dev/null
+++ b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/CellScannerPosition.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hbase.codec.prefixtree.scanner;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * An indicator of the state of the scanner after an operation such as nextCell() or positionAt(..).
+ * For example:
+ *
+ * - In a DataBlockScanner, the AFTER_LAST position indicates to the parent StoreFileScanner that
+ * it should load the next block.
+ * - In a StoreFileScanner, the AFTER_LAST position indicates that the file has been exhausted.
+ * - In a RegionScanner, the AFTER_LAST position indicates that the scanner should move to the
+ * next region.
+ *
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public enum CellScannerPosition {
+
+ /**
+ * getCurrentCell() will NOT return a valid cell. Calling nextCell() will advance to the first
+ * cell.
+ */
+ BEFORE_FIRST,
+
+ /**
+ * getCurrentCell() will return a valid cell, but it is not the cell requested by positionAt(..),
+ * rather it is the nearest cell before the requested cell.
+ */
+ BEFORE,
+
+ /**
+ * getCurrentCell() will return a valid cell, and it is exactly the cell that was requested by
+ * positionAt(..).
+ */
+ AT,
+
+ /**
+ * getCurrentCell() will return a valid cell, but it is not the cell requested by positionAt(..),
+ * rather it is the nearest cell after the requested cell.
+ */
+ AFTER,
+
+ /**
+ * getCurrentCell() will NOT return a valid cell. Calling nextCell() will have no effect.
+ */
+ AFTER_LAST
+
+}
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/CellSearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/CellSearcher.java
index 7c8269f..77aca62 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/CellSearcher.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/CellSearcher.java
@@ -20,7 +20,6 @@ package org.apache.hbase.codec.prefixtree.scanner;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hbase.Cell;
-import org.apache.hbase.cell.CellScannerPosition;
/**
* Methods for seeking to a random {@link Cell} inside a sorted collection of cells. Indicates that
@@ -28,6 +27,10 @@ import org.apache.hbase.cell.CellScannerPosition;
*/
@InterfaceAudience.Private
public interface CellSearcher extends ReversibleCellScanner {
+ /**
+ * Reset any state in the scanner so it appears it was freshly opened.
+ */
+ void resetToBeforeFirstEntry();
/**
* Do everything within this scanner's power to find the key. Look forward and backwards.
@@ -62,7 +65,7 @@ public interface CellSearcher extends ReversibleCellScanner {
CellScannerPosition positionAtOrAfter(Cell key);
/**
- * Note: Added for backwards compatibility with
+ * Note: Added for backwards compatibility with
* {@link org.apache.hadoop.hbase.regionserver.KeyValueScanner#reseek}
*
* Look for the key, but only look after the current position. Probably not needed for an
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java
index 9a6c37c..4714ade 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java
@@ -19,6 +19,7 @@
package org.apache.hbase.codec.prefixtree.scanner;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hbase.CellScanner;
/**
* An extension of CellScanner indicating the scanner supports iterating backwards through cells.
@@ -35,7 +36,7 @@ public interface ReversibleCellScanner extends CellScanner {
* @return true if the operation was successful, meaning getCurrentCell() will return a valid
* Cell.
* false if there were no previous cells, meaning getCurrentCell() will return null.
- * Scanner position will be {@link org.apache.hbase.cell.CellScannerPosition#BEFORE_FIRST}
+ * Scanner position will be {@link org.apache.hbase.codec.prefixtree.scanner.CellScannerPosition#BEFORE_FIRST}
*/
boolean previous();
@@ -45,7 +46,7 @@ public interface ReversibleCellScanner extends CellScanner {
* @return true if the operation was successful, meaning getCurrentCell() will return a valid
* Cell.
* false if there were no previous cells, meaning getCurrentCell() will return null.
- * Scanner position will be {@link org.apache.hbase.cell.CellScannerPosition#BEFORE_FIRST}
+ * Scanner position will be {@link org.apache.hbase.codec.prefixtree.scanner.CellScannerPosition#BEFORE_FIRST}
*/
boolean previousRow(boolean endOfRow);
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/BaseTestRowData.java b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/BaseTestRowData.java
index 1df6b05..716b7f3 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/BaseTestRowData.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/BaseTestRowData.java
@@ -21,7 +21,7 @@ package org.apache.hbase.codec.prefixtree.row;
import java.util.List;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hbase.cell.CellComparator;
+import org.apache.hbase.CellComparator;
import org.apache.hbase.codec.prefixtree.PrefixTreeBlockMeta;
import org.apache.hbase.codec.prefixtree.scanner.CellSearcher;
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/TestPrefixTreeSearcher.java b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/TestPrefixTreeSearcher.java
index 8490040..9e05bf3 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/TestPrefixTreeSearcher.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/TestPrefixTreeSearcher.java
@@ -25,13 +25,13 @@ import java.util.Collection;
import java.util.List;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueTool;
+import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.util.CollectionUtils;
import org.apache.hbase.Cell;
-import org.apache.hbase.cell.CellComparator;
-import org.apache.hbase.cell.CellScannerPosition;
+import org.apache.hbase.CellComparator;
import org.apache.hbase.codec.prefixtree.decode.DecoderFactory;
import org.apache.hbase.codec.prefixtree.encode.PrefixTreeEncoder;
+import org.apache.hbase.codec.prefixtree.scanner.CellScannerPosition;
import org.apache.hbase.codec.prefixtree.scanner.CellSearcher;
import org.junit.Assert;
import org.junit.Test;
@@ -72,10 +72,10 @@ public class TestPrefixTreeSearcher {
searcher = DecoderFactory.checkOut(block, true);
int i = -1;
- while (searcher.next()) {
+ while (searcher.advance()) {
++i;
KeyValue inputCell = rows.getInputs().get(i);
- Cell outputCell = searcher.getCurrent();
+ Cell outputCell = searcher.current();
// check all 3 permutations of equals()
Assert.assertEquals(inputCell, outputCell);
@@ -100,7 +100,7 @@ public class TestPrefixTreeSearcher {
++i;
int oppositeIndex = rows.getInputs().size() - i - 1;
KeyValue inputKv = rows.getInputs().get(oppositeIndex);
- KeyValue outputKv = KeyValueTool.copyToNewKeyValue(searcher.getCurrent());
+ KeyValue outputKv = KeyValueUtil.copyToNewKeyValue(searcher.current());
Assert.assertEquals(inputKv, outputKv);
}
Assert.assertEquals(rows.getInputs().size(), i + 1);
@@ -118,7 +118,7 @@ public class TestPrefixTreeSearcher {
for (KeyValue kv : rows.getInputs()) {
boolean hit = searcher.positionAt(kv);
Assert.assertTrue(hit);
- Cell foundKv = searcher.getCurrent();
+ Cell foundKv = searcher.current();
Assert.assertTrue(CellComparator.equals(kv, foundKv));
}
} finally {
@@ -139,7 +139,7 @@ public class TestPrefixTreeSearcher {
KeyValue kv = rows.getInputs().get(i);
//nextRow
- KeyValue inputNextRow = KeyValueTool.createFirstKeyInNextRow(kv);
+ KeyValue inputNextRow = KeyValueUtil.createFirstKeyInNextRow(kv);
CellScannerPosition position = searcher.positionAtOrBefore(inputNextRow);
boolean isFirstInRow = rowStartIndexes.contains(i);
@@ -158,7 +158,7 @@ public class TestPrefixTreeSearcher {
}
//previous KV
- KeyValue inputPreviousKv = KeyValueTool.previousKey(kv);
+ KeyValue inputPreviousKv = KeyValueUtil.previousKey(kv);
boolean hit = searcher.positionAt(inputPreviousKv);
Assert.assertFalse(hit);
position = searcher.positionAtOrAfter(inputPreviousKv);
@@ -169,7 +169,7 @@ public class TestPrefixTreeSearcher {
/*
* TODO: why i+1 instead of i?
*/
- Assert.assertEquals(rows.getInputs().get(i+1), searcher.getCurrent());
+ Assert.assertEquals(rows.getInputs().get(i+1), searcher.current());
}
}
} finally {
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/TestRowEncoder.java b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/TestRowEncoder.java
index ad4a6b4..8c102f5 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/TestRowEncoder.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/TestRowEncoder.java
@@ -26,7 +26,7 @@ import java.util.Collection;
import java.util.List;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueTool;
+import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hbase.Cell;
import org.apache.hbase.codec.prefixtree.PrefixTreeBlockMeta;
@@ -105,10 +105,10 @@ public class TestRowEncoder {
@Test
public void testForwardScanner() {
int counter = -1;
- while (searcher.next()) {
+ while (searcher.advance()) {
++counter;
KeyValue inputKv = rows.getInputs().get(counter);
- KeyValue outputKv = KeyValueTool.copyToNewKeyValue(searcher.getCurrent());
+ KeyValue outputKv = KeyValueUtil.copyToNewKeyValue(searcher.current());
assertKeyAndValueEqual(inputKv, outputKv);
}
// assert same number of cells
@@ -127,7 +127,7 @@ public class TestRowEncoder {
++counter;
int oppositeIndex = rows.getInputs().size() - counter - 1;
KeyValue inputKv = rows.getInputs().get(oppositeIndex);
- KeyValue outputKv = KeyValueTool.copyToNewKeyValue(searcher.getCurrent());
+ KeyValue outputKv = KeyValueUtil.copyToNewKeyValue(searcher.current());
assertKeyAndValueEqual(inputKv, outputKv);
}
Assert.assertEquals(rows.getInputs().size(), counter + 1);
@@ -151,13 +151,13 @@ public class TestRowEncoder {
// a next+previous should cancel out
if (!searcher.isAfterLast()) {
- searcher.next();
+ searcher.advance();
searcher.previous();
}
int oppositeIndex = rows.getInputs().size() - counter - 1;
KeyValue inputKv = rows.getInputs().get(oppositeIndex);
- KeyValue outputKv = KeyValueTool.copyToNewKeyValue(searcher.getCurrent());
+ KeyValue outputKv = KeyValueUtil.copyToNewKeyValue(searcher.current());
assertKeyAndValueEqual(inputKv, outputKv);
}
Assert.assertEquals(rows.getInputs().size(), counter + 1);
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataDeeper.java b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataDeeper.java
index f8fba5f..4a80507 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataDeeper.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataDeeper.java
@@ -22,9 +22,9 @@ import java.util.List;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hbase.cell.CellScannerPosition;
import org.apache.hbase.codec.prefixtree.PrefixTreeBlockMeta;
import org.apache.hbase.codec.prefixtree.row.BaseTestRowData;
+import org.apache.hbase.codec.prefixtree.scanner.CellScannerPosition;
import org.apache.hbase.codec.prefixtree.scanner.CellSearcher;
import org.junit.Assert;
@@ -75,9 +75,9 @@ public class TestRowDataDeeper extends BaseTestRowData{
KeyValue cfcRow = KeyValue.createFirstOnRow(Bytes.toBytes("cfc"));
CellScannerPosition position = searcher.positionAtOrAfter(cfcRow);
Assert.assertEquals(CellScannerPosition.AFTER, position);
- Assert.assertEquals(d.get(2), searcher.getCurrent());
+ Assert.assertEquals(d.get(2), searcher.current());
searcher.previous();
- Assert.assertEquals(d.get(1), searcher.getCurrent());
+ Assert.assertEquals(d.get(1), searcher.current());
}
}
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataNumberStrings.java b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataNumberStrings.java
index 515fb57..f960527 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataNumberStrings.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataNumberStrings.java
@@ -24,7 +24,7 @@ import java.util.List;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hbase.cell.CellComparator;
+import org.apache.hbase.CellComparator;
import org.apache.hbase.codec.prefixtree.row.BaseTestRowData;
import com.google.common.collect.Lists;
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataSearcherRowMiss.java b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataSearcherRowMiss.java
index f621141..f9ee21b 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataSearcherRowMiss.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataSearcherRowMiss.java
@@ -23,9 +23,9 @@ import java.util.List;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hbase.Cell;
-import org.apache.hbase.cell.CellComparator;
-import org.apache.hbase.cell.CellScannerPosition;
+import org.apache.hbase.CellComparator;
import org.apache.hbase.codec.prefixtree.row.BaseTestRowData;
+import org.apache.hbase.codec.prefixtree.scanner.CellScannerPosition;
import org.apache.hbase.codec.prefixtree.scanner.CellSearcher;
import org.junit.Assert;
@@ -66,13 +66,13 @@ public class TestRowDataSearcherRowMiss extends BaseTestRowData{
searcher.resetToBeforeFirstEntry();
//test first cell
- searcher.next();
- Cell first = searcher.getCurrent();
+ searcher.advance();
+ Cell first = searcher.current();
Assert.assertTrue(CellComparator.equals(d.get(0), first));
//test first cell in second row
Assert.assertTrue(searcher.positionAt(d.get(1)));
- Assert.assertTrue(CellComparator.equals(d.get(1), searcher.getCurrent()));
+ Assert.assertTrue(CellComparator.equals(d.get(1), searcher.current()));
testBetween1and2(searcher);
testBetween2and3(searcher);
@@ -94,12 +94,12 @@ public class TestRowDataSearcherRowMiss extends BaseTestRowData{
//test atOrBefore
p = searcher.positionAtOrBefore(betweenAAndAAA);
Assert.assertEquals(CellScannerPosition.BEFORE, p);
- Assert.assertTrue(CellComparator.equals(searcher.getCurrent(), d.get(1)));
+ Assert.assertTrue(CellComparator.equals(searcher.current(), d.get(1)));
//test atOrAfter
p = searcher.positionAtOrAfter(betweenAAndAAA);
Assert.assertEquals(CellScannerPosition.AFTER, p);
- Assert.assertTrue(CellComparator.equals(searcher.getCurrent(), d.get(2)));
+ Assert.assertTrue(CellComparator.equals(searcher.current(), d.get(2)));
}
private void testBetween2and3(CellSearcher searcher){
@@ -112,12 +112,12 @@ public class TestRowDataSearcherRowMiss extends BaseTestRowData{
//test atOrBefore
p = searcher.positionAtOrBefore(betweenAAAndB);
Assert.assertEquals(CellScannerPosition.BEFORE, p);
- Assert.assertTrue(CellComparator.equals(searcher.getCurrent(), d.get(2)));
+ Assert.assertTrue(CellComparator.equals(searcher.current(), d.get(2)));
//test atOrAfter
p = searcher.positionAtOrAfter(betweenAAAndB);
Assert.assertEquals(CellScannerPosition.AFTER, p);
- Assert.assertTrue(CellComparator.equals(searcher.getCurrent(), d.get(3)));
+ Assert.assertTrue(CellComparator.equals(searcher.current(), d.get(3)));
}
}
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataSimple.java b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataSimple.java
index b77253f..0be8e8e 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataSimple.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataSimple.java
@@ -24,9 +24,9 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CollectionUtils;
import org.apache.hbase.Cell;
-import org.apache.hbase.cell.CellComparator;
-import org.apache.hbase.cell.CellScannerPosition;
+import org.apache.hbase.CellComparator;
import org.apache.hbase.codec.prefixtree.row.BaseTestRowData;
+import org.apache.hbase.codec.prefixtree.scanner.CellScannerPosition;
import org.apache.hbase.codec.prefixtree.scanner.CellSearcher;
import org.junit.Assert;
@@ -66,13 +66,13 @@ public class TestRowDataSimple extends BaseTestRowData {
searcher.resetToBeforeFirstEntry();
// test first cell
- searcher.next();
- Cell first = searcher.getCurrent();
+ searcher.advance();
+ Cell first = searcher.current();
Assert.assertTrue(CellComparator.equals(d.get(0), first));
// test first cell in second row
Assert.assertTrue(searcher.positionAt(d.get(3)));
- Assert.assertTrue(CellComparator.equals(d.get(3), searcher.getCurrent()));
+ Assert.assertTrue(CellComparator.equals(d.get(3), searcher.current()));
Cell between4And5 = new KeyValue(rowB, cf, cq1, ts - 2, v0);
@@ -82,12 +82,12 @@ public class TestRowDataSimple extends BaseTestRowData {
// test atOrBefore
p = searcher.positionAtOrBefore(between4And5);
Assert.assertEquals(CellScannerPosition.BEFORE, p);
- Assert.assertTrue(CellComparator.equals(searcher.getCurrent(), d.get(4)));
+ Assert.assertTrue(CellComparator.equals(searcher.current(), d.get(4)));
// test atOrAfter
p = searcher.positionAtOrAfter(between4And5);
Assert.assertEquals(CellScannerPosition.AFTER, p);
- Assert.assertTrue(CellComparator.equals(searcher.getCurrent(), d.get(5)));
+ Assert.assertTrue(CellComparator.equals(searcher.current(), d.get(5)));
// test when key falls before first key in block
Cell beforeFirst = new KeyValue(Bytes.toBytes("A"), cf, cq0, ts, v0);
@@ -96,8 +96,8 @@ public class TestRowDataSimple extends BaseTestRowData {
Assert.assertEquals(CellScannerPosition.BEFORE_FIRST, p);
p = searcher.positionAtOrAfter(beforeFirst);
Assert.assertEquals(CellScannerPosition.AFTER, p);
- Assert.assertTrue(CellComparator.equals(searcher.getCurrent(), d.get(0)));
- Assert.assertEquals(d.get(0), searcher.getCurrent());
+ Assert.assertTrue(CellComparator.equals(searcher.current(), d.get(0)));
+ Assert.assertEquals(d.get(0), searcher.current());
// test when key falls after last key in block
Cell afterLast = new KeyValue(Bytes.toBytes("z"), cf, cq0, ts, v0);// must be lower case z
@@ -106,7 +106,7 @@ public class TestRowDataSimple extends BaseTestRowData {
Assert.assertEquals(CellScannerPosition.AFTER_LAST, p);
p = searcher.positionAtOrBefore(afterLast);
Assert.assertEquals(CellScannerPosition.BEFORE, p);
- Assert.assertTrue(CellComparator.equals(searcher.getCurrent(), CollectionUtils.getLast(d)));
+ Assert.assertTrue(CellComparator.equals(searcher.current(), CollectionUtils.getLast(d)));
}
}
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataTrivial.java b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataTrivial.java
index 30e3e58..108d2d7 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataTrivial.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hbase/codec/prefixtree/row/data/TestRowDataTrivial.java
@@ -22,9 +22,9 @@ import java.util.List;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hbase.cell.CellScannerPosition;
import org.apache.hbase.codec.prefixtree.PrefixTreeBlockMeta;
import org.apache.hbase.codec.prefixtree.row.BaseTestRowData;
+import org.apache.hbase.codec.prefixtree.scanner.CellScannerPosition;
import org.apache.hbase.codec.prefixtree.scanner.CellSearcher;
import org.junit.Assert;
@@ -68,6 +68,6 @@ public class TestRowDataTrivial extends BaseTestRowData{
KeyValue afterLast = KeyValue.createFirstOnRow(Bytes.toBytes("zzz"));
CellScannerPosition position = searcher.positionAtOrAfter(afterLast);
Assert.assertEquals(CellScannerPosition.AFTER_LAST, position);
- Assert.assertNull(searcher.getCurrent());
+ Assert.assertNull(searcher.current());
}
}
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index 38be369..181641b 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -53,11 +53,16 @@
-
-
- com.google.protobuf
- protobuf-java
-
+
+
+ org.apache.hbase
+ hbase-common
+
+
+
+ com.google.protobuf
+ protobuf-java
+
@@ -74,4 +79,4 @@
-
\ No newline at end of file
+
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index 4b090ea..04b094a 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -4905,20 +4905,20 @@ public final class ZooKeeperProtos {
public interface TableLockOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // required bytes tableName = 1;
+ // optional bytes tableName = 1;
boolean hasTableName();
com.google.protobuf.ByteString getTableName();
- // required .ServerName lockOwner = 2;
+ // optional .ServerName lockOwner = 2;
boolean hasLockOwner();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder();
- // required int64 threadId = 3;
+ // optional int64 threadId = 3;
boolean hasThreadId();
long getThreadId();
- // required bool isShared = 4;
+ // optional bool isShared = 4;
boolean hasIsShared();
boolean getIsShared();
@@ -4955,7 +4955,7 @@ public final class ZooKeeperProtos {
}
private int bitField0_;
- // required bytes tableName = 1;
+ // optional bytes tableName = 1;
public static final int TABLENAME_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString tableName_;
public boolean hasTableName() {
@@ -4965,7 +4965,7 @@ public final class ZooKeeperProtos {
return tableName_;
}
- // required .ServerName lockOwner = 2;
+ // optional .ServerName lockOwner = 2;
public static final int LOCKOWNER_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_;
public boolean hasLockOwner() {
@@ -4978,7 +4978,7 @@ public final class ZooKeeperProtos {
return lockOwner_;
}
- // required int64 threadId = 3;
+ // optional int64 threadId = 3;
public static final int THREADID_FIELD_NUMBER = 3;
private long threadId_;
public boolean hasThreadId() {
@@ -4988,7 +4988,7 @@ public final class ZooKeeperProtos {
return threadId_;
}
- // required bool isShared = 4;
+ // optional bool isShared = 4;
public static final int ISSHARED_FIELD_NUMBER = 4;
private boolean isShared_;
public boolean hasIsShared() {
@@ -5042,25 +5042,11 @@ public final class ZooKeeperProtos {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
- if (!hasTableName()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasLockOwner()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasThreadId()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasIsShared()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!getLockOwner().isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
+ if (hasLockOwner()) {
+ if (!getLockOwner().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
}
memoizedIsInitialized = 1;
return true;
@@ -5419,25 +5405,11 @@ public final class ZooKeeperProtos {
}
public final boolean isInitialized() {
- if (!hasTableName()) {
-
- return false;
- }
- if (!hasLockOwner()) {
-
- return false;
- }
- if (!hasThreadId()) {
-
- return false;
- }
- if (!hasIsShared()) {
-
- return false;
- }
- if (!getLockOwner().isInitialized()) {
-
- return false;
+ if (hasLockOwner()) {
+ if (!getLockOwner().isInitialized()) {
+
+ return false;
+ }
}
return true;
}
@@ -5500,7 +5472,7 @@ public final class ZooKeeperProtos {
private int bitField0_;
- // required bytes tableName = 1;
+ // optional bytes tableName = 1;
private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY;
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -5524,7 +5496,7 @@ public final class ZooKeeperProtos {
return this;
}
- // required .ServerName lockOwner = 2;
+ // optional .ServerName lockOwner = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> lockOwnerBuilder_;
@@ -5614,7 +5586,7 @@ public final class ZooKeeperProtos {
return lockOwnerBuilder_;
}
- // required int64 threadId = 3;
+ // optional int64 threadId = 3;
private long threadId_ ;
public boolean hasThreadId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
@@ -5635,7 +5607,7 @@ public final class ZooKeeperProtos {
return this;
}
- // required bool isShared = 4;
+ // optional bool isShared = 4;
private boolean isShared_ ;
public boolean hasIsShared() {
return ((bitField0_ & 0x00000008) == 0x00000008);
@@ -5786,9 +5758,9 @@ public final class ZooKeeperProtos {
"tate.State\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISA" +
"BLED\020\001\"+\n\027ReplicationHLogPosition\022\020\n\010pos" +
"ition\030\001 \002(\003\"$\n\017ReplicationLock\022\021\n\tlockOw" +
- "ner\030\001 \002(\t\"s\n\tTableLock\022\021\n\ttableName\030\001 \002(",
- "\014\022\036\n\tlockOwner\030\002 \002(\0132\013.ServerName\022\020\n\010thr" +
- "eadId\030\003 \002(\003\022\020\n\010isShared\030\004 \002(\010\022\017\n\007purpose" +
+ "ner\030\001 \002(\t\"s\n\tTableLock\022\021\n\ttableName\030\001 \001(",
+ "\014\022\036\n\tlockOwner\030\002 \001(\0132\013.ServerName\022\020\n\010thr" +
+ "eadId\030\003 \001(\003\022\020\n\010isShared\030\004 \001(\010\022\017\n\007purpose" +
"\030\005 \001(\tBE\n*org.apache.hadoop.hbase.protob" +
"uf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
};
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
index db1f7aa..515516e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hbase.cell.CellComparator;
+import org.apache.hbase.CellComparator;
import org.junit.Test;
import org.junit.experimental.categories.Category;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 5639c6c..0cdd6d1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -91,7 +91,7 @@ import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hbase.cell.CellComparator;
+import org.apache.hbase.CellComparator;
import org.junit.Assert;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -225,7 +225,7 @@ public class TestHRegion extends HBaseTestCase {
RegionScanner scanner1 = region.getScanner(scan);
System.out.println("Smallest read point:" + region.getSmallestReadPoint());
-
+
region.compactStores(true);
scanner1.reseek(Bytes.toBytes("r2"));
@@ -254,7 +254,7 @@ public class TestHRegion extends HBaseTestCase {
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
- HLog.Writer writer = HLogFactory.createWriter(fs,
+ HLog.Writer writer = HLogFactory.createWriter(fs,
recoveredEdits, conf);
long time = System.nanoTime();
@@ -306,7 +306,7 @@ public class TestHRegion extends HBaseTestCase {
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
- HLog.Writer writer = HLogFactory.createWriter(fs,
+ HLog.Writer writer = HLogFactory.createWriter(fs,
recoveredEdits, conf);
long time = System.nanoTime();
@@ -367,7 +367,7 @@ public class TestHRegion extends HBaseTestCase {
recoveredEditsDir, String.format("%019d", minSeqId-1));
FSDataOutputStream dos= fs.create(recoveredEdits);
dos.close();
-
+
Map maxSeqIdInStores = new TreeMap(
Bytes.BYTES_COMPARATOR);
for (Store store : region.getStores().values()) {
@@ -525,7 +525,7 @@ public class TestHRegion extends HBaseTestCase {
this.region = initHRegion(TABLE, getName(), conf, true, Bytes.toBytes("somefamily"));
boolean exceptionCaught = false;
Append append = new Append(Bytes.toBytes("somerow"));
- append.add(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"),
+ append.add(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"),
Bytes.toBytes("somevalue"));
try {
region.append(append, false);
@@ -541,7 +541,7 @@ public class TestHRegion extends HBaseTestCase {
public void testIncrWithReadOnlyTable() throws Exception {
byte[] TABLE = Bytes.toBytes("readOnlyTable");
this.region = initHRegion(TABLE, getName(), conf, true, Bytes.toBytes("somefamily"));
- boolean exceptionCaught = false;
+ boolean exceptionCaught = false;
Increment inc = new Increment(Bytes.toBytes("somerow"));
inc.addColumn(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"), 1L);
try {
@@ -710,7 +710,7 @@ public class TestHRegion extends HBaseTestCase {
LOG.info("...starting put thread while holding lock");
ctx.addThread(putter);
ctx.startThreads();
-
+
LOG.info("...waiting for put thread to sync first time");
long startWait = System.currentTimeMillis();
while (metricsAssertHelper.getCounter("syncTimeNumOps", source) == syncs +2 ) {
@@ -730,7 +730,7 @@ public class TestHRegion extends HBaseTestCase {
assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY :
OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode());
}
-
+
LOG.info("Nexta, a batch put which uses an already-held lock");
lockedRow = region.obtainRowLock(Bytes.toBytes("row_2"));
LOG.info("...obtained row lock");
@@ -740,7 +740,7 @@ public class TestHRegion extends HBaseTestCase {
if (i == 2) pair.setSecond(lockedRow);
putsAndLocks.add(pair);
}
-
+
codes = region.batchMutate(putsAndLocks.toArray(new Pair[0]));
LOG.info("...performed put");
for (int i = 0; i < 10; i++) {
@@ -749,7 +749,7 @@ public class TestHRegion extends HBaseTestCase {
}
// Make sure we didn't do an extra batch
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 5, source);
-
+
// Make sure we still hold lock
assertTrue(region.isRowLocked(lockedRow));
LOG.info("...releasing lock");
@@ -1867,7 +1867,7 @@ public class TestHRegion extends HBaseTestCase {
/**
* This method tests https://issues.apache.org/jira/browse/HBASE-2516.
- * @throws IOException
+ * @throws IOException
*/
public void testGetScanner_WithRegionClosed() throws IOException {
byte[] tableName = Bytes.toBytes("testtable");
@@ -3434,11 +3434,11 @@ public class TestHRegion extends HBaseTestCase {
}
}
}
-
+
/**
* Testcase to check state of region initialization task set to ABORTED or not if any exceptions
* during initialization
- *
+ *
* @throws Exception
*/
@Test
@@ -3602,7 +3602,7 @@ public class TestHRegion extends HBaseTestCase {
Result res = this.region.get(get);
List kvs = res.getColumn(Incrementer.family,
Incrementer.qualifier);
-
+
//we just got the latest version
assertEquals(kvs.size(), 1);
KeyValue kv = kvs.get(0);
@@ -3696,7 +3696,7 @@ public class TestHRegion extends HBaseTestCase {
Result res = this.region.get(get);
List kvs = res.getColumn(Appender.family,
Appender.qualifier);
-
+
//we just got the latest version
assertEquals(kvs.size(), 1);
KeyValue kv = kvs.get(0);
@@ -3765,7 +3765,7 @@ public class TestHRegion extends HBaseTestCase {
assertEquals(1, kvs.size());
assertEquals(Bytes.toBytes("value1"), kvs.get(0).getValue());
}
-
+
private void putData(int startRow, int numRows, byte [] qf,
byte [] ...families)
throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java
index 23d71d3..beecd42 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java
@@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hbase.cell.CellComparator;
+import org.apache.hbase.CellComparator;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
@@ -122,7 +122,7 @@ public class TestMultiColumnScanner {
@Parameters
public static final Collection | | | | | | | | | | |