diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index 85bb7ad..964cb5d 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -67,6 +67,10 @@ guava + com.google.protobuf + protobuf-java + + commons-logging commons-logging @@ -77,7 +81,7 @@ org.slf4j slf4j-log4j12 - + diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 04bacb4..f6d5c3f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.io.RawComparator; import org.apache.hbase.Cell; -import org.apache.hbase.cell.CellComparator; +import org.apache.hbase.CellComparator; import com.google.common.primitives.Longs; @@ -915,7 +915,7 @@ public class KeyValue implements Cell, HeapSize { return "empty"; } return keyToString(this.bytes, this.offset + ROW_OFFSET, getKeyLength()) + - "/vlen=" + getValueLength() + "/ts=" + memstoreTS; + "/vlen=" + getValueLength() + "/mvcc=" + memstoreTS; } /** @@ -2299,8 +2299,10 @@ public class KeyValue implements Cell, HeapSize { } /** - * @param in Where to read bytes from - * @return KeyValue created by deserializing from in + * @param in Where to read bytes from. Creates a byte array to hold the KeyValue + * backing bytes copied from the steam. + * @return KeyValue created by deserializing from in OR if we find a length + * of zero, we will return null which can be useful marking a stream as done. * @throws IOException */ public static KeyValue create(final DataInput in) throws IOException { @@ -2311,7 +2313,8 @@ public class KeyValue implements Cell, HeapSize { * Create a KeyValue reading length from in * @param length * @param in - * @return Created KeyValue + * @return Created KeyValue OR if we find a length of zero, we will return null which + * can be useful marking a stream as done. * @throws IOException */ public static KeyValue create(int length, final DataInput in) throws IOException { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java index c2052b0..bb8923c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.IterableUtils; import org.apache.hadoop.hbase.util.Strings; -import org.apache.hbase.cell.CellComparator; +import org.apache.hbase.CellComparator; import com.google.common.collect.Lists; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTool.java index e6297e2..9a5dca5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTool.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTool.java @@ -21,14 +21,13 @@ package org.apache.hadoop.hbase; import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.IterableUtils; import org.apache.hadoop.io.WritableUtils; import org.apache.hbase.Cell; -import org.apache.hbase.cell.CellTool; +import org.apache.hbase.CellTool; /** * static convenience methods for dealing with KeyValues and collections of KeyValues diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java index b2ce35a..07b85d3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java @@ -31,7 +31,6 @@ import org.apache.hadoop.io.RawComparator; *
  • the KeyValues are stored sorted by key
  • *
  • we know the structure of KeyValue
  • *
  • the values are always iterated forward from beginning of block
  • - *
  • knowledge of Key Value format
  • * * It is designed to work fast enough to be feasible as in memory compression. * diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/LoadTestKVGenerator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/LoadTestKVGenerator.java index 5a64a1b..6a30ca3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/LoadTestKVGenerator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/LoadTestKVGenerator.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.hbase.util.test; -import java.util.Map; import java.util.Random; import org.apache.hadoop.hbase.util.Bytes; diff --git a/hbase-common/src/main/java/org/apache/hbase/Cell.java b/hbase-common/src/main/java/org/apache/hbase/Cell.java index 044b287..532f8ae 100644 --- a/hbase-common/src/main/java/org/apache/hbase/Cell.java +++ b/hbase-common/src/main/java/org/apache/hbase/Cell.java @@ -20,7 +20,6 @@ package org.apache.hbase; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hbase.cell.CellTool; /** diff --git a/hbase-common/src/main/java/org/apache/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hbase/CellComparator.java new file mode 100644 index 0000000..f6a3c25 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/CellComparator.java @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hbase; + +import java.io.Serializable; +import java.util.Comparator; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.common.primitives.Longs; + +/** + * Compare two traditional HBase cells. + * + * Note: This comparator is not valid for -ROOT- and .META. tables. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class CellComparator implements Comparator, Serializable{ + private static final long serialVersionUID = -8760041766259623329L; + + @Override + public int compare(Cell a, Cell b) { + return compareStatic(a, b); + } + + + public static int compareStatic(Cell a, Cell b) { + //row + int c = Bytes.compareTo( + a.getRowArray(), a.getRowOffset(), a.getRowLength(), + b.getRowArray(), b.getRowOffset(), b.getRowLength()); + if (c != 0) return c; + + //family + c = Bytes.compareTo( + a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(), + b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength()); + if (c != 0) return c; + + //qualifier + c = Bytes.compareTo( + a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(), + b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength()); + if (c != 0) return c; + + //timestamp: later sorts first + c = -Longs.compare(a.getTimestamp(), b.getTimestamp()); + if (c != 0) return c; + + //type + c = (0xff & a.getTypeByte()) - (0xff & b.getTypeByte()); + if (c != 0) return c; + + //mvccVersion: later sorts first + return -Longs.compare(a.getMvccVersion(), b.getMvccVersion()); + } + + + /**************** equals ****************************/ + + public static boolean equals(Cell a, Cell b){ + if (!areKeyLengthsEqual(a, b)) { + return false; + } + //TODO compare byte[]'s in reverse since later bytes more likely to differ + return 0 == compareStatic(a, b); + } + + public static boolean equalsRow(Cell a, Cell b){ + if(!areRowLengthsEqual(a, b)){ + return false; + } + return 0 == Bytes.compareTo( + a.getRowArray(), a.getRowOffset(), a.getRowLength(), + b.getRowArray(), b.getRowOffset(), b.getRowLength()); + } + + + /********************* hashCode ************************/ + + /** + * Returns a hash code that is always the same for two Cells having a matching equals(..) result. + * Currently does not guard against nulls, but it could if necessary. + */ + public static int hashCode(Cell cell){ + if (cell == null) {// return 0 for empty Cell + return 0; + } + + //pre-calculate the 3 hashes made of byte ranges + int rowHash = Bytes.hashCode(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + int familyHash = Bytes.hashCode(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); + int qualifierHash = Bytes.hashCode(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); + + //combine the 6 sub-hashes + int hash = 31 * rowHash + familyHash; + hash = 31 * hash + qualifierHash; + hash = 31 * hash + (int)cell.getTimestamp(); + hash = 31 * hash + cell.getTypeByte(); + hash = 31 * hash + (int)cell.getMvccVersion(); + return hash; + } + + + /******************** lengths *************************/ + + public static boolean areKeyLengthsEqual(Cell a, Cell b) { + return a.getRowLength() == b.getRowLength() + && a.getFamilyLength() == b.getFamilyLength() + && a.getQualifierLength() == b.getQualifierLength(); + } + + public static boolean areRowLengthsEqual(Cell a, Cell b) { + return a.getRowLength() == b.getRowLength(); + } + + + /***************** special cases ****************************/ + + /** + * special case for KeyValue.equals + */ + private static int compareStaticIgnoreMvccVersion(Cell a, Cell b) { + //row + int c = Bytes.compareTo( + a.getRowArray(), a.getRowOffset(), a.getRowLength(), + b.getRowArray(), b.getRowOffset(), b.getRowLength()); + if (c != 0) return c; + + //family + c = Bytes.compareTo( + a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(), + b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength()); + if (c != 0) return c; + + //qualifier + c = Bytes.compareTo( + a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(), + b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength()); + if (c != 0) return c; + + //timestamp: later sorts first + c = -Longs.compare(a.getTimestamp(), b.getTimestamp()); + if (c != 0) return c; + + //type + c = (0xff & a.getTypeByte()) - (0xff & b.getTypeByte()); + return c; + } + + /** + * special case for KeyValue.equals + */ + public static boolean equalsIgnoreMvccVersion(Cell a, Cell b){ + return 0 == compareStaticIgnoreMvccVersion(a, b); + } + +} diff --git a/hbase-common/src/main/java/org/apache/hbase/CellScanner.java b/hbase-common/src/main/java/org/apache/hbase/CellScanner.java new file mode 100644 index 0000000..1da8849 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/CellScanner.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hbase.Cell; + +/** + * An interface for iterating through a sequence of cells. Similar to Java's Iterator, but without + * the hasNext() or remove() methods. The hasNext() method is problematic because it may require + * actually loading the next object, which in turn requires storing the previous object somewhere. + * + *

    The core data block decoder should be as fast as possible, so we push the complexity and + * performance expense of concurrently tracking multiple cells to layers above the CellScanner. + *

    + * The {@link #get()} method will return a reference to a Cell implementation. This reference may + * or may not point to a reusable cell implementation, so users of the CellScanner should not, for + * example, accumulate a List of Cells. All of the references may point to the same object, which + * would be the latest state of the underlying Cell. In short, the Cell is mutable. Use + * {@link #getDeepCopy()} if you want a Cell that does not hold references (if the data is + * encoded or compressed, the call to {@link #getDeepCopy()} will cost more than a {@link #get()}. + *

    + * Typical usage: + * + *

    + * while (scanner.next()) {
    + *   Cell cell = scanner.get();
    + *   // do something
    + * }
    + * 
    + *

    Often used reading {@link Cell}s written by {@Link org.apache.hbase.io.CellOutputStream}. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public interface CellScanner { + /** + * @return the current Cell which may be mutable + */ + Cell get(); + + /** + * @return Get a deep copy of the current Cell or null if no current Cell + */ + Cell getDeepCopy(); + + /** + * Advance the scanner 1 cell. + * @return true if the next cell is found and {@link #get()} will return a valid Cell + */ + boolean next(); +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hbase/CellTool.java b/hbase-common/src/main/java/org/apache/hbase/CellTool.java new file mode 100644 index 0000000..d05c813 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/CellTool.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hbase; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.ByteRange; +import org.apache.hadoop.hbase.KeyValue; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class CellTool { + + /******************* ByteRange *******************************/ + + public static ByteRange fillRowRange(Cell cell, ByteRange range) { + return range.set(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + } + + public static ByteRange fillFamilyRange(Cell cell, ByteRange range) { + return range.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); + } + + public static ByteRange fillQualifierRange(Cell cell, ByteRange range) { + return range.set(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()); + } + + + /***************** get individual arrays for tests ************/ + + public static byte[] getRowArray(Cell cell){ + byte[] output = new byte[cell.getRowLength()]; + copyRowTo(cell, output, 0); + return output; + } + + public static byte[] getFamilyArray(Cell cell){ + byte[] output = new byte[cell.getFamilyLength()]; + copyFamilyTo(cell, output, 0); + return output; + } + + public static byte[] getQualifierArray(Cell cell){ + byte[] output = new byte[cell.getQualifierLength()]; + copyQualifierTo(cell, output, 0); + return output; + } + + public static byte[] getValueArray(Cell cell){ + byte[] output = new byte[cell.getValueLength()]; + copyValueTo(cell, output, 0); + return output; + } + + + /******************** copyTo **********************************/ + + public static int copyRowTo(Cell cell, byte[] destination, int destinationOffset) { + System.arraycopy(cell.getRowArray(), cell.getRowOffset(), destination, destinationOffset, + cell.getRowLength()); + return destinationOffset + cell.getRowLength(); + } + + public static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset) { + System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), destination, destinationOffset, + cell.getFamilyLength()); + return destinationOffset + cell.getFamilyLength(); + } + + public static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset) { + System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), destination, + destinationOffset, cell.getQualifierLength()); + return destinationOffset + cell.getQualifierLength(); + } + + public static int copyValueTo(Cell cell, byte[] destination, int destinationOffset) { + System.arraycopy(cell.getValueArray(), cell.getValueOffset(), destination, destinationOffset, + cell.getValueLength()); + return destinationOffset + cell.getValueLength(); + } + + + /********************* misc *************************************/ + + public static byte getRowByte(Cell cell, int index) { + return cell.getRowArray()[cell.getRowOffset() + index]; + } + + public static ByteBuffer getValueBufferShallowCopy(Cell cell) { + ByteBuffer buffer = ByteBuffer.wrap(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength()); +// buffer.position(buffer.limit());//make it look as if value was appended + return buffer; + } + + public static Cell createCell(final byte [] row, final byte [] family, final byte [] qualifier, + final long timestamp, final byte type, final byte [] value) { + // I need a Cell Factory here. Using KeyValue for now. TODO. + // TODO: Make a new Cell implementation that just carries these + // byte arrays. + return new KeyValue(row, family, qualifier, timestamp, + KeyValue.Type.codeToType(type), value); + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hbase/cell/CellComparator.java b/hbase-common/src/main/java/org/apache/hbase/cell/CellComparator.java deleted file mode 100644 index 19d6b54..0000000 --- a/hbase-common/src/main/java/org/apache/hbase/cell/CellComparator.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hbase.cell; - -import java.io.Serializable; -import java.util.Comparator; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hbase.Cell; - -import com.google.common.primitives.Longs; - -/** - * Compare two traditional HBase cells. - * - * Note: This comparator is not valid for -ROOT- and .META. tables. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class CellComparator implements Comparator, Serializable{ - private static final long serialVersionUID = -8760041766259623329L; - - @Override - public int compare(Cell a, Cell b) { - return compareStatic(a, b); - } - - - public static int compareStatic(Cell a, Cell b) { - //row - int c = Bytes.compareTo( - a.getRowArray(), a.getRowOffset(), a.getRowLength(), - b.getRowArray(), b.getRowOffset(), b.getRowLength()); - if (c != 0) return c; - - //family - c = Bytes.compareTo( - a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(), - b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength()); - if (c != 0) return c; - - //qualifier - c = Bytes.compareTo( - a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(), - b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength()); - if (c != 0) return c; - - //timestamp: later sorts first - c = -Longs.compare(a.getTimestamp(), b.getTimestamp()); - if (c != 0) return c; - - //type - c = (0xff & a.getTypeByte()) - (0xff & b.getTypeByte()); - if (c != 0) return c; - - //mvccVersion: later sorts first - return -Longs.compare(a.getMvccVersion(), b.getMvccVersion()); - } - - - /**************** equals ****************************/ - - public static boolean equals(Cell a, Cell b){ - if (!areKeyLengthsEqual(a, b)) { - return false; - } - //TODO compare byte[]'s in reverse since later bytes more likely to differ - return 0 == compareStatic(a, b); - } - - public static boolean equalsRow(Cell a, Cell b){ - if(!areRowLengthsEqual(a, b)){ - return false; - } - return 0 == Bytes.compareTo( - a.getRowArray(), a.getRowOffset(), a.getRowLength(), - b.getRowArray(), b.getRowOffset(), b.getRowLength()); - } - - - /********************* hashCode ************************/ - - /** - * Returns a hash code that is always the same for two Cells having a matching equals(..) result. - * Currently does not guard against nulls, but it could if necessary. - */ - public static int hashCode(Cell cell){ - if (cell == null) {// return 0 for empty Cell - return 0; - } - - //pre-calculate the 3 hashes made of byte ranges - int rowHash = Bytes.hashCode(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); - int familyHash = Bytes.hashCode(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); - int qualifierHash = Bytes.hashCode(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); - - //combine the 6 sub-hashes - int hash = 31 * rowHash + familyHash; - hash = 31 * hash + qualifierHash; - hash = 31 * hash + (int)cell.getTimestamp(); - hash = 31 * hash + cell.getTypeByte(); - hash = 31 * hash + (int)cell.getMvccVersion(); - return hash; - } - - - /******************** lengths *************************/ - - public static boolean areKeyLengthsEqual(Cell a, Cell b) { - return a.getRowLength() == b.getRowLength() - && a.getFamilyLength() == b.getFamilyLength() - && a.getQualifierLength() == b.getQualifierLength(); - } - - public static boolean areRowLengthsEqual(Cell a, Cell b) { - return a.getRowLength() == b.getRowLength(); - } - - - /***************** special cases ****************************/ - - /** - * special case for KeyValue.equals - */ - private static int compareStaticIgnoreMvccVersion(Cell a, Cell b) { - //row - int c = Bytes.compareTo( - a.getRowArray(), a.getRowOffset(), a.getRowLength(), - b.getRowArray(), b.getRowOffset(), b.getRowLength()); - if (c != 0) return c; - - //family - c = Bytes.compareTo( - a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(), - b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength()); - if (c != 0) return c; - - //qualifier - c = Bytes.compareTo( - a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(), - b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength()); - if (c != 0) return c; - - //timestamp: later sorts first - c = -Longs.compare(a.getTimestamp(), b.getTimestamp()); - if (c != 0) return c; - - //type - c = (0xff & a.getTypeByte()) - (0xff & b.getTypeByte()); - return c; - } - - /** - * special case for KeyValue.equals - */ - public static boolean equalsIgnoreMvccVersion(Cell a, Cell b){ - return 0 == compareStaticIgnoreMvccVersion(a, b); - } - -} diff --git a/hbase-common/src/main/java/org/apache/hbase/cell/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hbase/cell/CellOutputStream.java deleted file mode 100644 index fcaf27e..0000000 --- a/hbase-common/src/main/java/org/apache/hbase/cell/CellOutputStream.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hbase.cell; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hbase.Cell; - -/** - * Accepts a stream of Cells and adds them to its internal data structure. This can be used to build - * a block of cells during compactions and flushes, or to build a byte[] to send to the client. This - * could be backed by a List, but more efficient implementations will append results to a - * byte[] to eliminate overhead, and possibly encode the cells further. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public interface CellOutputStream { - - /** - * Implementation must copy the entire state of the Cell. If the appended Cell is modified - * immediately after the append method returns, the modifications must have absolutely no effect - * on the copy of the Cell that was added to the appender. For example, calling someList.add(cell) - * is not correct. - */ - void write(Cell cell); - - /** - * Let the implementation decide what to do. Usually means writing accumulated data into a byte[] - * that can then be read from the implementation to be sent to disk, put in the block cache, or - * sent over the network. - */ - void flush(); - -} diff --git a/hbase-common/src/main/java/org/apache/hbase/cell/CellScannerPosition.java b/hbase-common/src/main/java/org/apache/hbase/cell/CellScannerPosition.java deleted file mode 100644 index eeadf5f..0000000 --- a/hbase-common/src/main/java/org/apache/hbase/cell/CellScannerPosition.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hbase.cell; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * An indicator of the state of the scanner after an operation such as nextCell() or positionAt(..). - * For example: - *

    - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public enum CellScannerPosition { - - /** - * getCurrentCell() will NOT return a valid cell. Calling nextCell() will advance to the first - * cell. - */ - BEFORE_FIRST, - - /** - * getCurrentCell() will return a valid cell, but it is not the cell requested by positionAt(..), - * rather it is the nearest cell before the requested cell. - */ - BEFORE, - - /** - * getCurrentCell() will return a valid cell, and it is exactly the cell that was requested by - * positionAt(..). - */ - AT, - - /** - * getCurrentCell() will return a valid cell, but it is not the cell requested by positionAt(..), - * rather it is the nearest cell after the requested cell. - */ - AFTER, - - /** - * getCurrentCell() will NOT return a valid cell. Calling nextCell() will have no effect. - */ - AFTER_LAST - -} diff --git a/hbase-common/src/main/java/org/apache/hbase/cell/CellTool.java b/hbase-common/src/main/java/org/apache/hbase/cell/CellTool.java deleted file mode 100644 index 229ca36..0000000 --- a/hbase-common/src/main/java/org/apache/hbase/cell/CellTool.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hbase.cell; - -import java.nio.ByteBuffer; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.ByteRange; -import org.apache.hbase.Cell; - -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class CellTool { - - /******************* ByteRange *******************************/ - - public static ByteRange fillRowRange(Cell cell, ByteRange range) { - return range.set(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); - } - - public static ByteRange fillFamilyRange(Cell cell, ByteRange range) { - return range.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); - } - - public static ByteRange fillQualifierRange(Cell cell, ByteRange range) { - return range.set(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); - } - - - /***************** get individual arrays for tests ************/ - - public static byte[] getRowArray(Cell cell){ - byte[] output = new byte[cell.getRowLength()]; - copyRowTo(cell, output, 0); - return output; - } - - public static byte[] getFamilyArray(Cell cell){ - byte[] output = new byte[cell.getFamilyLength()]; - copyFamilyTo(cell, output, 0); - return output; - } - - public static byte[] getQualifierArray(Cell cell){ - byte[] output = new byte[cell.getQualifierLength()]; - copyQualifierTo(cell, output, 0); - return output; - } - - public static byte[] getValueArray(Cell cell){ - byte[] output = new byte[cell.getValueLength()]; - copyValueTo(cell, output, 0); - return output; - } - - - /******************** copyTo **********************************/ - - public static int copyRowTo(Cell cell, byte[] destination, int destinationOffset) { - System.arraycopy(cell.getRowArray(), cell.getRowOffset(), destination, destinationOffset, - cell.getRowLength()); - return destinationOffset + cell.getRowLength(); - } - - public static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset) { - System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), destination, destinationOffset, - cell.getFamilyLength()); - return destinationOffset + cell.getFamilyLength(); - } - - public static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset) { - System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), destination, - destinationOffset, cell.getQualifierLength()); - return destinationOffset + cell.getQualifierLength(); - } - - public static int copyValueTo(Cell cell, byte[] destination, int destinationOffset) { - System.arraycopy(cell.getValueArray(), cell.getValueOffset(), destination, destinationOffset, - cell.getValueLength()); - return destinationOffset + cell.getValueLength(); - } - - - /********************* misc *************************************/ - - public static byte getRowByte(Cell cell, int index) { - return cell.getRowArray()[cell.getRowOffset() + index]; - } - - - /********************** KeyValue (move to KeyValueUtils) *********************/ - - public static ByteBuffer getValueBufferShallowCopy(Cell cell) { - ByteBuffer buffer = ByteBuffer.wrap(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength()); -// buffer.position(buffer.limit());//make it look as if value was appended - return buffer; - } - -} diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/BasicCellDecoder.java b/hbase-common/src/main/java/org/apache/hbase/codec/BasicCellDecoder.java new file mode 100644 index 0000000..6cd6efa --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/codec/BasicCellDecoder.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.codec; + +import java.io.DataInputStream; +import java.io.IOException; +import java.io.InputStream; + +import org.apache.hbase.Cell; +import org.apache.hbase.CellScanner; +import org.apache.hbase.CellTool; + +/** + * Depends on {@link InputStream#available()}. Stops reading when returns 0. + * @see BasicCellEncoder + */ +public class BasicCellDecoder implements CellScanner { + private final DataInputStream in; + private boolean hasNext = true; + private Cell current = null; + + public BasicCellDecoder(final InputStream in) { + this.in = new DataInputStream(in); + } + + @Override + public boolean next() { + if (!this.hasNext) return this.hasNext; + try { + if (this.in.available() <= 0) { + this.hasNext = false; + return this.hasNext; + } + byte [] row = readByteArray(this.in); + byte [] family = readByteArray(in); + byte [] qualifier = readByteArray(in); + long timestamp = this.in.readLong(); + byte type = this.in.readByte(); + byte [] value = readByteArray(in); + this.current = CellTool.createCell(row, family, qualifier, timestamp, type, value); + } catch (IOException e) { + throw new RuntimeException(e); + } + return this.hasNext; + } + + /** + * @return Byte array read from the stream. + * @throws IOException + */ + private static byte [] readByteArray(final DataInputStream in) throws IOException { + int length = in.readInt(); + byte [] bytes = new byte [length]; + in.readFully(bytes, 0, length); + return bytes; + } + + @Override + public Cell get() { + return this.current; + } + + @Override + public Cell getDeepCopy() { + return get(); + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/BasicCellEncoder.java b/hbase-common/src/main/java/org/apache/hbase/codec/BasicCellEncoder.java new file mode 100644 index 0000000..426dcee --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/codec/BasicCellEncoder.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.codec; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +import org.apache.hbase.Cell; +import org.apache.hbase.io.CellOutputStream; + +/** + * Cell encoder that just writes out all the individual elements of a Cell. Uses ints delimiting + * all lengths. Profligate. Does not write the mvcc stamp. Use a different encoder if you want + * that in the stream. + */ +public class BasicCellEncoder implements CellOutputStream { + private final DataOutputStream out; + // This encoder is 'done' once flush has been called + private boolean flushed = false; + + public BasicCellEncoder(final OutputStream out) { + this.out = new DataOutputStream(out); + } + + @Override + public void write(Cell cell) throws IOException { + if (this.flushed) throw new CodecException("Flushed; done"); + try { + // Row + write(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + // Column family + write(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); + // Qualifier + write(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); + // Version + this.out.writeLong(cell.getTimestamp()); + // Type + this.out.writeByte(cell.getTypeByte()); + // Value + write(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + } catch (IOException e) { + throw new CodecException(e); + } + } + + /** + * Write vint length followed by array bytes. + * @param bytes + * @param offset + * @param length + * @throws IOException + */ + private void write(final byte [] bytes, final int offset, final int length) + throws IOException { + this.out.writeInt(length); + this.out.write(bytes, offset, length); + } + + @Override + public void flush() throws IOException { + if (this.flushed) return; + this.flushed = true; + try { + this.out.flush(); + } catch (IOException e) { + throw new CodecException(e); + } + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/CellMessageDecoder.java b/hbase-common/src/main/java/org/apache/hbase/codec/CellMessageDecoder.java new file mode 100644 index 0000000..9ff1465 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/codec/CellMessageDecoder.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.codec; + +import java.io.IOException; +import java.io.InputStream; + +import org.apache.hbase.Cell; +import org.apache.hbase.CellScanner; +import org.apache.hbase.CellTool; +import org.apache.hbase.protobuf.generated.CellProtos; + +/** + * Depends on {@link InputStream#available()}. Stops reading when returns 0. + * @see BasicCellEncoder + */ +public class CellMessageDecoder implements CellScanner { + private final InputStream in; + private boolean hasNext = true; + private Cell current = null; + + public CellMessageDecoder(final InputStream in) { + this.in = in; + } + + @Override + public boolean next() { + if (!this.hasNext) return this.hasNext; + try { + if (this.in.available() <= 0) { + this.hasNext = false; + return this.hasNext; + } + CellProtos.Cell pbcell = CellProtos.Cell.parseDelimitedFrom(this.in); + this.current = CellTool.createCell(pbcell.getRow().toByteArray(), + pbcell.getFamily().toByteArray(), pbcell.getQualifier().toByteArray(), + pbcell.getTimestamp(), (byte)pbcell.getCellType().getNumber(), + pbcell.getValue().toByteArray()); + } catch (IOException e) { + throw new RuntimeException(e); + } + return this.hasNext; + } + + @Override + public Cell get() { + return this.current; + } + + @Override + public Cell getDeepCopy() { + return get(); + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/CellMessageEncoder.java b/hbase-common/src/main/java/org/apache/hbase/codec/CellMessageEncoder.java new file mode 100644 index 0000000..88d1221 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/codec/CellMessageEncoder.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.codec; + +import java.io.IOException; +import java.io.OutputStream; + +import org.apache.hbase.Cell; +import org.apache.hbase.io.CellOutputStream; +import org.apache.hbase.protobuf.generated.CellProtos; + +import com.google.protobuf.ByteString; + +/** + * Cell encoder that just writes out Cell as a pb Cell Message. Does not write the mvcc stamp. + * Use a different encoder if you want that in the stream. + */ +public class CellMessageEncoder implements CellOutputStream { + private final OutputStream out; + // This encoder is 'done' once flush has been called + private boolean flushed = false; + + public CellMessageEncoder(final OutputStream out) { + this.out = out; + } + + @Override + public void write(Cell cell) throws IOException { + if (this.flushed) throw new CodecException("Flushed; done"); + CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder(); + // This copies bytes from Cell to ByteString. I don't see anyway around the copy. + // ByteString is final. + builder.setRow(ByteString.copyFrom(cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength())); + builder.setFamily(ByteString.copyFrom(cell.getFamilyArray(), cell.getFamilyOffset(), + cell.getFamilyLength())); + builder.setQualifier(ByteString.copyFrom(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength())); + builder.setTimestamp(cell.getTimestamp()); + builder.setCellType(CellProtos.CellType.valueOf(cell.getTypeByte())); + builder.setValue(ByteString.copyFrom(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength())); + CellProtos.Cell pbcell = builder.build(); + try { + pbcell.writeDelimitedTo(this.out); + } catch (IOException e) { + throw new CodecException(e); + } + } + + @Override + public void flush() throws IOException { + if (this.flushed) return; + this.flushed = true; + try { + this.out.flush(); + } catch (IOException e) { + throw new CodecException(e); + } + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/CodecException.java b/hbase-common/src/main/java/org/apache/hbase/codec/CodecException.java new file mode 100644 index 0000000..352e1a6 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/codec/CodecException.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.codec; + +import java.io.IOException; + +public class CodecException extends IOException { + private static final long serialVersionUID = -2850095011686914405L; + + public CodecException() { + } + + public CodecException(String message) { + super(message); + } + + public CodecException(Throwable t) { + super(t); + } + + public CodecException(String message, Throwable t) { + super(message, t); + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/KeyValueDecoder.java b/hbase-common/src/main/java/org/apache/hbase/codec/KeyValueDecoder.java new file mode 100644 index 0000000..d56a656 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/codec/KeyValueDecoder.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.codec; + +import java.io.DataInputStream; +import java.io.IOException; +import java.io.InputStream; + +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hbase.Cell; +import org.apache.hbase.CellScanner; + +/** + * Depends on {@link InputStream#available()} + * @see KeyValueEncoder + */ +public class KeyValueDecoder implements CellScanner { + private final DataInputStream in; + private boolean hasNext = true; + private Cell current = null; + + public KeyValueDecoder(final InputStream in) { + this.in = new DataInputStream(in); + } + + @Override + public boolean next() { + if (!this.hasNext) return !this.hasNext; + try { + if (this.in.available() <= 0) { + this.hasNext = false; + return this.hasNext; + } + int length = this.in.readInt(); + this.current = KeyValue.create(length, this.in); + } catch (IOException e) { + throw new RuntimeException(e); + } + return this.hasNext; + } + + @Override + public Cell get() { + return this.current; + } + + @Override + public Cell getDeepCopy() { + return get(); + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hbase/codec/KeyValueEncoder.java b/hbase-common/src/main/java/org/apache/hbase/codec/KeyValueEncoder.java new file mode 100644 index 0000000..092914a --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/codec/KeyValueEncoder.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.codec; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hbase.Cell; +import org.apache.hbase.io.CellOutputStream; + +/** + * Encodes by casting Cell to KeyValue and writing out the backing array with a length prefix. + * This is how KVs were serialized in Puts, Deletes and Results pre-0.96. Its what would + * happen if you called the Writable#write KeyValue implementation. This encoder will fail + * if the passed Cell is not an old-school pre-0.96 KeyValue. Does not copy bytes writing. + * It just writes them direct to the passed stream. + * + *

    If you wrote two KeyValues to this encoder, it would look like this in the stream: + *

    + * length-of-KeyValue1 // A java int with the length of KeyValue1 backing array
    + * KeyValue1 backing array filled with a KeyValue serialized in its particular format
    + * length-of-KeyValue2
    + * KeyValue2 backing array
    + * 
    + * @see KeyValueDecoder + */ +public class KeyValueEncoder implements CellOutputStream { + // Need to be able to write java types such as int and long so need DataOutput. + // Want to stream too so DataOutputStream. + private final DataOutputStream out; + private boolean flushed = false; + + public KeyValueEncoder(final OutputStream out) { + this.out = new DataOutputStream(out); + } + + @Override + public void write(Cell cell) throws IOException { + if (this.flushed) throw new CodecException("Flushed, done"); + // This is crass and will not work when KV changes + try { + KeyValue.write((KeyValue)cell, this.out); + } catch (IOException e) { + throw new CodecException(e); + } + } + + /** + * Calling flush 'finishes' this encoder. Subsequent calls + * to {@link #write(Cell)} will throw exception. + */ + @Override + public void flush() throws IOException { + if (this.flushed) return; + this.flushed = true; + // Write out an int whose value is zero as end of stream. + try { + this.out.flush(); + } catch (IOException e) { + throw new CodecException(e); + } + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hbase/io/CellOutputStream.java new file mode 100644 index 0000000..f832e8d --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/io/CellOutputStream.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hbase.io; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hbase.Cell; +import org.apache.hbase.CellScanner; + +/** + * Accepts a stream of Cells. This can be used to build a block of cells during compactions + * and flushes, or to build a byte[] to send to the client. This could be backed by a + * List, but more efficient implementations will append results to a + * byte[] to eliminate overhead, and possibly encode the cells further. + *

    To read Cells, use {@link CellScanner} + * @see CellScanner + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface CellOutputStream { + /** + * Implementation must copy the entire state of the Cell. If the written Cell is modified + * immediately after the write method returns, the modifications must have absolutely no effect + * on the copy of the Cell that was added in the write. + * @param cell Cell to write out + * @throws IOException + */ + void write(Cell cell) throws IOException; + + /** + * Let the implementation decide what to do. Usually means writing accumulated data into a byte[] + * that can then be read from the implementation to be sent to disk, put in the block cache, or + * sent over the network. + * @throws IOException + */ + void flush() throws IOException; +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hbase/protobuf/generated/CellProtos.java b/hbase-common/src/main/java/org/apache/hbase/protobuf/generated/CellProtos.java new file mode 100644 index 0000000..c5c7fb2 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hbase/protobuf/generated/CellProtos.java @@ -0,0 +1,860 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: cell.proto + +package org.apache.hbase.protobuf.generated; + +public final class CellProtos { + private CellProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public enum CellType + implements com.google.protobuf.ProtocolMessageEnum { + MINIMUM(0, 0), + PUT(1, 4), + DELETE(2, 8), + DELETE_COLUMN(3, 12), + DELETE_FAMILY(4, 14), + MAXIMUM(5, 255), + ; + + public static final int MINIMUM_VALUE = 0; + public static final int PUT_VALUE = 4; + public static final int DELETE_VALUE = 8; + public static final int DELETE_COLUMN_VALUE = 12; + public static final int DELETE_FAMILY_VALUE = 14; + public static final int MAXIMUM_VALUE = 255; + + + public final int getNumber() { return value; } + + public static CellType valueOf(int value) { + switch (value) { + case 0: return MINIMUM; + case 4: return PUT; + case 8: return DELETE; + case 12: return DELETE_COLUMN; + case 14: return DELETE_FAMILY; + case 255: return MAXIMUM; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public CellType findValueByNumber(int number) { + return CellType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hbase.protobuf.generated.CellProtos.getDescriptor().getEnumTypes().get(0); + } + + private static final CellType[] VALUES = { + MINIMUM, PUT, DELETE, DELETE_COLUMN, DELETE_FAMILY, MAXIMUM, + }; + + public static CellType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private CellType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:CellType) + } + + public interface CellOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional bytes row = 1; + boolean hasRow(); + com.google.protobuf.ByteString getRow(); + + // optional bytes family = 2; + boolean hasFamily(); + com.google.protobuf.ByteString getFamily(); + + // optional bytes qualifier = 3; + boolean hasQualifier(); + com.google.protobuf.ByteString getQualifier(); + + // optional uint64 timestamp = 4; + boolean hasTimestamp(); + long getTimestamp(); + + // optional .CellType cellType = 5; + boolean hasCellType(); + org.apache.hbase.protobuf.generated.CellProtos.CellType getCellType(); + + // optional bytes value = 6; + boolean hasValue(); + com.google.protobuf.ByteString getValue(); + } + public static final class Cell extends + com.google.protobuf.GeneratedMessage + implements CellOrBuilder { + // Use Cell.newBuilder() to construct. + private Cell(Builder builder) { + super(builder); + } + private Cell(boolean noInit) {} + + private static final Cell defaultInstance; + public static Cell getDefaultInstance() { + return defaultInstance; + } + + public Cell getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hbase.protobuf.generated.CellProtos.internal_static_Cell_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hbase.protobuf.generated.CellProtos.internal_static_Cell_fieldAccessorTable; + } + + private int bitField0_; + // optional bytes row = 1; + public static final int ROW_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString row_; + public boolean hasRow() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getRow() { + return row_; + } + + // optional bytes family = 2; + public static final int FAMILY_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString family_; + public boolean hasFamily() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public com.google.protobuf.ByteString getFamily() { + return family_; + } + + // optional bytes qualifier = 3; + public static final int QUALIFIER_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString qualifier_; + public boolean hasQualifier() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public com.google.protobuf.ByteString getQualifier() { + return qualifier_; + } + + // optional uint64 timestamp = 4; + public static final int TIMESTAMP_FIELD_NUMBER = 4; + private long timestamp_; + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public long getTimestamp() { + return timestamp_; + } + + // optional .CellType cellType = 5; + public static final int CELLTYPE_FIELD_NUMBER = 5; + private org.apache.hbase.protobuf.generated.CellProtos.CellType cellType_; + public boolean hasCellType() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public org.apache.hbase.protobuf.generated.CellProtos.CellType getCellType() { + return cellType_; + } + + // optional bytes value = 6; + public static final int VALUE_FIELD_NUMBER = 6; + private com.google.protobuf.ByteString value_; + public boolean hasValue() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + public com.google.protobuf.ByteString getValue() { + return value_; + } + + private void initFields() { + row_ = com.google.protobuf.ByteString.EMPTY; + family_ = com.google.protobuf.ByteString.EMPTY; + qualifier_ = com.google.protobuf.ByteString.EMPTY; + timestamp_ = 0L; + cellType_ = org.apache.hbase.protobuf.generated.CellProtos.CellType.MINIMUM; + value_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, row_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, family_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, qualifier_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, timestamp_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeEnum(5, cellType_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBytes(6, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, row_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, family_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, qualifier_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, timestamp_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(5, cellType_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(6, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hbase.protobuf.generated.CellProtos.Cell)) { + return super.equals(obj); + } + org.apache.hbase.protobuf.generated.CellProtos.Cell other = (org.apache.hbase.protobuf.generated.CellProtos.Cell) obj; + + boolean result = true; + result = result && (hasRow() == other.hasRow()); + if (hasRow()) { + result = result && getRow() + .equals(other.getRow()); + } + result = result && (hasFamily() == other.hasFamily()); + if (hasFamily()) { + result = result && getFamily() + .equals(other.getFamily()); + } + result = result && (hasQualifier() == other.hasQualifier()); + if (hasQualifier()) { + result = result && getQualifier() + .equals(other.getQualifier()); + } + result = result && (hasTimestamp() == other.hasTimestamp()); + if (hasTimestamp()) { + result = result && (getTimestamp() + == other.getTimestamp()); + } + result = result && (hasCellType() == other.hasCellType()); + if (hasCellType()) { + result = result && + (getCellType() == other.getCellType()); + } + result = result && (hasValue() == other.hasValue()); + if (hasValue()) { + result = result && getValue() + .equals(other.getValue()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRow()) { + hash = (37 * hash) + ROW_FIELD_NUMBER; + hash = (53 * hash) + getRow().hashCode(); + } + if (hasFamily()) { + hash = (37 * hash) + FAMILY_FIELD_NUMBER; + hash = (53 * hash) + getFamily().hashCode(); + } + if (hasQualifier()) { + hash = (37 * hash) + QUALIFIER_FIELD_NUMBER; + hash = (53 * hash) + getQualifier().hashCode(); + } + if (hasTimestamp()) { + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimestamp()); + } + if (hasCellType()) { + hash = (37 * hash) + CELLTYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getCellType()); + } + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hbase.protobuf.generated.CellProtos.Cell parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hbase.protobuf.generated.CellProtos.Cell parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hbase.protobuf.generated.CellProtos.Cell parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hbase.protobuf.generated.CellProtos.Cell parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hbase.protobuf.generated.CellProtos.Cell parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hbase.protobuf.generated.CellProtos.Cell parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hbase.protobuf.generated.CellProtos.Cell parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hbase.protobuf.generated.CellProtos.Cell parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hbase.protobuf.generated.CellProtos.Cell parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hbase.protobuf.generated.CellProtos.Cell parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hbase.protobuf.generated.CellProtos.Cell prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hbase.protobuf.generated.CellProtos.CellOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hbase.protobuf.generated.CellProtos.internal_static_Cell_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hbase.protobuf.generated.CellProtos.internal_static_Cell_fieldAccessorTable; + } + + // Construct using org.apache.hbase.protobuf.generated.CellProtos.Cell.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + row_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + family_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + qualifier_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + timestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + cellType_ = org.apache.hbase.protobuf.generated.CellProtos.CellType.MINIMUM; + bitField0_ = (bitField0_ & ~0x00000010); + value_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hbase.protobuf.generated.CellProtos.Cell.getDescriptor(); + } + + public org.apache.hbase.protobuf.generated.CellProtos.Cell getDefaultInstanceForType() { + return org.apache.hbase.protobuf.generated.CellProtos.Cell.getDefaultInstance(); + } + + public org.apache.hbase.protobuf.generated.CellProtos.Cell build() { + org.apache.hbase.protobuf.generated.CellProtos.Cell result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hbase.protobuf.generated.CellProtos.Cell buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hbase.protobuf.generated.CellProtos.Cell result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hbase.protobuf.generated.CellProtos.Cell buildPartial() { + org.apache.hbase.protobuf.generated.CellProtos.Cell result = new org.apache.hbase.protobuf.generated.CellProtos.Cell(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.row_ = row_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.family_ = family_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.qualifier_ = qualifier_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.timestamp_ = timestamp_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.cellType_ = cellType_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hbase.protobuf.generated.CellProtos.Cell) { + return mergeFrom((org.apache.hbase.protobuf.generated.CellProtos.Cell)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hbase.protobuf.generated.CellProtos.Cell other) { + if (other == org.apache.hbase.protobuf.generated.CellProtos.Cell.getDefaultInstance()) return this; + if (other.hasRow()) { + setRow(other.getRow()); + } + if (other.hasFamily()) { + setFamily(other.getFamily()); + } + if (other.hasQualifier()) { + setQualifier(other.getQualifier()); + } + if (other.hasTimestamp()) { + setTimestamp(other.getTimestamp()); + } + if (other.hasCellType()) { + setCellType(other.getCellType()); + } + if (other.hasValue()) { + setValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + row_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + family_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + qualifier_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + timestamp_ = input.readUInt64(); + break; + } + case 40: { + int rawValue = input.readEnum(); + org.apache.hbase.protobuf.generated.CellProtos.CellType value = org.apache.hbase.protobuf.generated.CellProtos.CellType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(5, rawValue); + } else { + bitField0_ |= 0x00000010; + cellType_ = value; + } + break; + } + case 50: { + bitField0_ |= 0x00000020; + value_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // optional bytes row = 1; + private com.google.protobuf.ByteString row_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasRow() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getRow() { + return row_; + } + public Builder setRow(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + row_ = value; + onChanged(); + return this; + } + public Builder clearRow() { + bitField0_ = (bitField0_ & ~0x00000001); + row_ = getDefaultInstance().getRow(); + onChanged(); + return this; + } + + // optional bytes family = 2; + private com.google.protobuf.ByteString family_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasFamily() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public com.google.protobuf.ByteString getFamily() { + return family_; + } + public Builder setFamily(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + family_ = value; + onChanged(); + return this; + } + public Builder clearFamily() { + bitField0_ = (bitField0_ & ~0x00000002); + family_ = getDefaultInstance().getFamily(); + onChanged(); + return this; + } + + // optional bytes qualifier = 3; + private com.google.protobuf.ByteString qualifier_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasQualifier() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public com.google.protobuf.ByteString getQualifier() { + return qualifier_; + } + public Builder setQualifier(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + qualifier_ = value; + onChanged(); + return this; + } + public Builder clearQualifier() { + bitField0_ = (bitField0_ & ~0x00000004); + qualifier_ = getDefaultInstance().getQualifier(); + onChanged(); + return this; + } + + // optional uint64 timestamp = 4; + private long timestamp_ ; + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public long getTimestamp() { + return timestamp_; + } + public Builder setTimestamp(long value) { + bitField0_ |= 0x00000008; + timestamp_ = value; + onChanged(); + return this; + } + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000008); + timestamp_ = 0L; + onChanged(); + return this; + } + + // optional .CellType cellType = 5; + private org.apache.hbase.protobuf.generated.CellProtos.CellType cellType_ = org.apache.hbase.protobuf.generated.CellProtos.CellType.MINIMUM; + public boolean hasCellType() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public org.apache.hbase.protobuf.generated.CellProtos.CellType getCellType() { + return cellType_; + } + public Builder setCellType(org.apache.hbase.protobuf.generated.CellProtos.CellType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + cellType_ = value; + onChanged(); + return this; + } + public Builder clearCellType() { + bitField0_ = (bitField0_ & ~0x00000010); + cellType_ = org.apache.hbase.protobuf.generated.CellProtos.CellType.MINIMUM; + onChanged(); + return this; + } + + // optional bytes value = 6; + private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasValue() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + public com.google.protobuf.ByteString getValue() { + return value_; + } + public Builder setValue(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + value_ = value; + onChanged(); + return this; + } + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000020); + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:Cell) + } + + static { + defaultInstance = new Cell(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Cell) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Cell_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Cell_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\ncell.proto\"u\n\004Cell\022\013\n\003row\030\001 \001(\014\022\016\n\006fam" + + "ily\030\002 \001(\014\022\021\n\tqualifier\030\003 \001(\014\022\021\n\ttimestam" + + "p\030\004 \001(\004\022\033\n\010cellType\030\005 \001(\0162\t.CellType\022\r\n\005" + + "value\030\006 \001(\014*`\n\010CellType\022\013\n\007MINIMUM\020\000\022\007\n\003" + + "PUT\020\004\022\n\n\006DELETE\020\010\022\021\n\rDELETE_COLUMN\020\014\022\021\n\r" + + "DELETE_FAMILY\020\016\022\014\n\007MAXIMUM\020\377\001B6\n#org.apa" + + "che.hbase.protobuf.generatedB\nCellProtos" + + "H\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_Cell_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_Cell_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Cell_descriptor, + new java.lang.String[] { "Row", "Family", "Qualifier", "Timestamp", "CellType", "Value", }, + org.apache.hbase.protobuf.generated.CellProtos.Cell.class, + org.apache.hbase.protobuf.generated.CellProtos.Cell.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-common/src/main/protobuf/README.txt b/hbase-common/src/main/protobuf/README.txt new file mode 100644 index 0000000..e2730fa --- /dev/null +++ b/hbase-common/src/main/protobuf/README.txt @@ -0,0 +1,28 @@ +These are the protobuf definition files. The produced java classes are generated +into src/main/java/*/protobuf/generated and then checked in. The reasoning is +that they change infrequently. + +To regnerate the classes after making definition file changes, ensure first that +the protobuf protoc tool is in your $PATH (You may need to download it and build +it first; its part of the protobuf package obtainable from here: +http://code.google.com/p/protobuf/downloads/list). Then run the following inside +this maven modules directory -- i.e. at HBASE_CHECKOUT_DIR/hbase-common (You +should be able to just copy and paste the below into a terminal and hit return +-- the protoc compiler runs fast): + + UNIX_PROTO_DIR=src/main/protobuf + JAVA_DIR=src/main/java/ + mkdir -p $JAVA_DIR 2> /dev/null + if which cygpath 2> /dev/null; then + PROTO_DIR=`cygpath --windows $UNIX_PROTO_DIR` + JAVA_DIR=`cygpath --windows $JAVA_DIR` + else + PROTO_DIR=$UNIX_PROTO_DIR + fi + for PROTO_FILE in $UNIX_PROTO_DIR/*.proto + do + protoc -I$PROTO_DIR --java_out=$JAVA_DIR $PROTO_FILE + done + +After you've done the above, check it in and then check it in (or post a patch +on a JIRA with your definition file changes and the generated files). diff --git a/hbase-common/src/main/protobuf/cell.proto b/hbase-common/src/main/protobuf/cell.proto new file mode 100644 index 0000000..65b9913 --- /dev/null +++ b/hbase-common/src/main/protobuf/cell.proto @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are shared throughout HBase + +option java_package = "org.apache.hbase.protobuf.generated"; +option java_outer_classname = "CellProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +/** + * The type of the key in a Cell + */ +enum CellType { + MINIMUM = 0; + PUT = 4; + + DELETE = 8; + DELETE_COLUMN = 12; + DELETE_FAMILY = 14; + + // MAXIMUM is used when searching; you look from maximum on down. + MAXIMUM = 255; +} + +/** + * Protocol buffer version of Cell. + */ +message Cell { + optional bytes row = 1; + optional bytes family = 2; + optional bytes qualifier = 3; + optional uint64 timestamp = 4; + optional CellType cellType = 5; + optional bytes value = 6; +} diff --git a/hbase-common/src/test/java/org/apache/hbase/codec/CodecPerformance.java b/hbase-common/src/test/java/org/apache/hbase/codec/CodecPerformance.java new file mode 100644 index 0000000..eaa1469 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hbase/codec/CodecPerformance.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.codec; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hbase.Cell; +import org.apache.hbase.CellScanner; +import org.apache.hbase.io.CellOutputStream; + +public class CodecPerformance { + public static final Log LOG = LogFactory.getLog(CodecPerformance.class); + + static Cell [] getCells(final int howMany) { + Cell [] cells = new Cell[howMany]; + for (int i = 0; i < howMany; i++) { + byte [] index = Bytes.toBytes(i); + KeyValue kv = new KeyValue(index, Bytes.toBytes("f"), index, index); + cells[i] = kv; + } + return cells; + } + + static int getRoughSize(final Cell [] cells) { + int size = 0; + for (Cell c: cells) { + size += c.getRowLength() + c.getFamilyLength() + c.getQualifierLength() + c.getValueLength(); + size += Bytes.SIZEOF_LONG + Bytes.SIZEOF_BYTE; + } + return size; + } + + static byte [] runEncoderTest(final int index, final int initialBufferSize, + final ByteArrayOutputStream baos, final CellOutputStream encoder, final Cell [] cells) + throws IOException { + long startTime = System.currentTimeMillis(); + for (int i = 0; i < cells.length; i++) { + encoder.write(cells[i]); + } + encoder.flush(); + LOG.info("" + index + " encoded count=" + cells.length + " in " + + (System.currentTimeMillis() - startTime) + "ms for encoder " + encoder); + // Ensure we did not have to grow the backing buffer. + assertTrue(baos.size() < initialBufferSize); + return baos.toByteArray(); + } + + static Cell [] runDecoderTest(final int index, final int count, final CellScanner decoder) + throws IOException { + Cell [] cells = new Cell[count]; + long startTime = System.currentTimeMillis(); + for (int i = 0; decoder.next(); i++) { + cells[i] = decoder.getDeepCopy(); + } + LOG.info("" + index + " decoded count=" + cells.length + " in " + + (System.currentTimeMillis() - startTime) + "ms for decoder " + decoder); + // Ensure we did not have to grow the backing buffer. + assertTrue(cells.length == count); + return cells; + } + + static void verifyCells(final Cell [] input, final Cell [] output) { + assertEquals(input.length, output.length); + for (int i = 0; i < input.length; i ++) { + input[i].equals(output[i]); + } + } + + public static void main(String[] args) throws IOException { + // How many Cells to encode/decode on each cycle. + final int count = 100000; + // How many times to do an operation; repeat gives hotspot chance to warm up. + final int cycles = 30; + + Cell [] cells = CodecPerformance.getCells(count); + int size = CodecPerformance.getRoughSize(cells); + int initialBufferSize = 2 * size; // Multiply by 2 to ensure we don't have to grow buffer + + // Test KeyValue codec. + byte [] bytes = null; + Cell [] cellsDecoded = null; + + for (int i = 0; i < cycles; i++) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(initialBufferSize); + KeyValueEncoder encoder = new KeyValueEncoder(baos); + bytes = runEncoderTest(i, initialBufferSize, baos, encoder, cells); + } + for (int i = 0; i < cycles; i++) { + ByteArrayInputStream bais = new ByteArrayInputStream(bytes); + KeyValueDecoder encoder = new KeyValueDecoder(bais); + cellsDecoded = CodecPerformance.runDecoderTest(i, count, encoder); + } + verifyCells(cells, cellsDecoded); + + // Test basic cell codec + bytes = null; + cellsDecoded = null; + for (int i = 0; i < cycles; i++) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(initialBufferSize); + BasicCellEncoder encoder = new BasicCellEncoder(baos); + bytes = runEncoderTest(i, initialBufferSize, baos, encoder, cells); + } + for (int i = 0; i < cycles; i++) { + ByteArrayInputStream bais = new ByteArrayInputStream(bytes); + BasicCellDecoder encoder = new BasicCellDecoder(bais); + cellsDecoded = CodecPerformance.runDecoderTest(i, count, encoder); + } + verifyCells(cells, cellsDecoded); + + // Test cell message codec + bytes = null; + cellsDecoded = null; + for (int i = 0; i < 1; i++) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(initialBufferSize); + CellMessageEncoder encoder = new CellMessageEncoder(baos); + bytes = runEncoderTest(i, initialBufferSize, baos, encoder, cells); + } + for (int i = 0; i < cycles; i++) { + ByteArrayInputStream bais = new ByteArrayInputStream(bytes); + CellMessageDecoder encoder = new CellMessageDecoder(bais); + cellsDecoded = CodecPerformance.runDecoderTest(i, count, encoder); + } + verifyCells(cells, cellsDecoded); + } +} \ No newline at end of file diff --git a/hbase-common/src/test/java/org/apache/hbase/codec/TestBasicCellCodec.java b/hbase-common/src/test/java/org/apache/hbase/codec/TestBasicCellCodec.java new file mode 100644 index 0000000..49bc5ee --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hbase/codec/TestBasicCellCodec.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.codec; + +import static org.junit.Assert.*; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hbase.Cell; +import org.apache.hbase.CellComparator; +import org.apache.hbase.codec.BasicCellDecoder; +import org.apache.hbase.codec.BasicCellEncoder; +import org.junit.Test; + +import com.google.common.io.CountingInputStream; +import com.google.common.io.CountingOutputStream; + +public class TestBasicCellCodec { + + @Test + public void testEmptyWorks() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingOutputStream cos = new CountingOutputStream(baos); + DataOutputStream dos = new DataOutputStream(cos); + BasicCellEncoder encoder = new BasicCellEncoder(dos); + encoder.flush(); + dos.close(); + long offset = cos.getCount(); + assertEquals(0, offset); + CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); + DataInputStream dis = new DataInputStream(cis); + BasicCellDecoder decoder = new BasicCellDecoder(dis); + assertFalse(decoder.next()); + dis.close(); + assertEquals(0, cis.getCount()); + } + + @Test + public void testOne() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingOutputStream cos = new CountingOutputStream(baos); + DataOutputStream dos = new DataOutputStream(cos); + BasicCellEncoder encoder = new BasicCellEncoder(dos); + final KeyValue kv = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v")); + encoder.write(kv); + encoder.flush(); + dos.close(); + long offset = cos.getCount(); + CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); + DataInputStream dis = new DataInputStream(cis); + BasicCellDecoder decoder = new BasicCellDecoder(dis); + assertTrue(decoder.next()); // First read should pull in the KV + assertFalse(decoder.next()); // Second read should trip over the end-of-stream marker and return false + dis.close(); + assertEquals(offset, cis.getCount()); + } + + @Test + public void testThree() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingOutputStream cos = new CountingOutputStream(baos); + DataOutputStream dos = new DataOutputStream(cos); + BasicCellEncoder encoder = new BasicCellEncoder(dos); + final KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("1")); + final KeyValue kv2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"), Bytes.toBytes("2")); + final KeyValue kv3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"), Bytes.toBytes("3")); + encoder.write(kv1); + encoder.write(kv2); + encoder.write(kv3); + encoder.flush(); + dos.close(); + long offset = cos.getCount(); + CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); + DataInputStream dis = new DataInputStream(cis); + BasicCellDecoder decoder = new BasicCellDecoder(dis); + assertTrue(decoder.next()); + Cell c = decoder.get(); + assertTrue(CellComparator.equals(c, kv1)); + assertTrue(decoder.next()); + c = decoder.get(); + assertTrue(CellComparator.equals(c, kv2)); + assertTrue(decoder.next()); + c = decoder.get(); + assertTrue(CellComparator.equals(c, kv3)); + assertFalse(decoder.next()); + dis.close(); + assertEquals(offset, cis.getCount()); + } +} \ No newline at end of file diff --git a/hbase-common/src/test/java/org/apache/hbase/codec/TestCellMessageCodec.java b/hbase-common/src/test/java/org/apache/hbase/codec/TestCellMessageCodec.java new file mode 100644 index 0000000..87928a3 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hbase/codec/TestCellMessageCodec.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.codec; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hbase.Cell; +import org.apache.hbase.CellComparator; +import org.junit.Test; + +import com.google.common.io.CountingInputStream; +import com.google.common.io.CountingOutputStream; + +public class TestCellMessageCodec { + public static final Log LOG = LogFactory.getLog(TestCellMessageCodec.class); + + @Test + public void testEmptyWorks() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingOutputStream cos = new CountingOutputStream(baos); + DataOutputStream dos = new DataOutputStream(cos); + CellMessageEncoder encoder = new CellMessageEncoder(dos); + encoder.flush(); + dos.close(); + long offset = cos.getCount(); + assertEquals(0, offset); + CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); + DataInputStream dis = new DataInputStream(cis); + CellMessageDecoder decoder = new CellMessageDecoder(dis); + assertFalse(decoder.next()); + dis.close(); + assertEquals(0, cis.getCount()); + } + + @Test + public void testOne() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingOutputStream cos = new CountingOutputStream(baos); + DataOutputStream dos = new DataOutputStream(cos); + CellMessageEncoder encoder = new CellMessageEncoder(dos); + final KeyValue kv = + new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v")); + encoder.write(kv); + encoder.flush(); + dos.close(); + long offset = cos.getCount(); + CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); + DataInputStream dis = new DataInputStream(cis); + CellMessageDecoder decoder = new CellMessageDecoder(dis); + assertTrue(decoder.next()); // First read should pull in the KV + assertFalse(decoder.next()); // Second read should trip over the end-of-stream marker and return false + dis.close(); + assertEquals(offset, cis.getCount()); + } + + @Test + public void testThree() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingOutputStream cos = new CountingOutputStream(baos); + DataOutputStream dos = new DataOutputStream(cos); + CellMessageEncoder encoder = new CellMessageEncoder(dos); + final KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("1")); + final KeyValue kv2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"), Bytes.toBytes("2")); + final KeyValue kv3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"), Bytes.toBytes("3")); + encoder.write(kv1); + encoder.write(kv2); + encoder.write(kv3); + encoder.flush(); + dos.close(); + long offset = cos.getCount(); + CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); + DataInputStream dis = new DataInputStream(cis); + CellMessageDecoder decoder = new CellMessageDecoder(dis); + assertTrue(decoder.next()); + Cell c = decoder.get(); + assertTrue(CellComparator.equals(c, kv1)); + assertTrue(decoder.next()); + c = decoder.get(); + assertTrue(CellComparator.equals(c, kv2)); + assertTrue(decoder.next()); + c = decoder.get(); + assertTrue(CellComparator.equals(c, kv3)); + assertFalse(decoder.next()); + dis.close(); + assertEquals(offset, cis.getCount()); + } +} \ No newline at end of file diff --git a/hbase-common/src/test/java/org/apache/hbase/codec/TestKeyValueCodec.java b/hbase-common/src/test/java/org/apache/hbase/codec/TestKeyValueCodec.java new file mode 100644 index 0000000..92a710a --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hbase/codec/TestKeyValueCodec.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.codec; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hbase.Cell; +import org.junit.Test; + +import com.google.common.io.CountingInputStream; +import com.google.common.io.CountingOutputStream; + +public class TestKeyValueCodec { + @Test + public void testEmptyWorks() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingOutputStream cos = new CountingOutputStream(baos); + DataOutputStream dos = new DataOutputStream(cos); + KeyValueEncoder encoder = new KeyValueEncoder(dos); + encoder.flush(); + dos.close(); + long offset = cos.getCount(); + assertEquals(0, offset); + CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); + DataInputStream dis = new DataInputStream(cis); + KeyValueDecoder decoder = new KeyValueDecoder(dis); + assertFalse(decoder.next()); + dis.close(); + assertEquals(0, cis.getCount()); + } + + @Test + public void testOne() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingOutputStream cos = new CountingOutputStream(baos); + DataOutputStream dos = new DataOutputStream(cos); + KeyValueEncoder encoder = new KeyValueEncoder(dos); + final KeyValue kv = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v")); + final long length = kv.getLength() + Bytes.SIZEOF_INT; + encoder.write(kv); + encoder.flush(); + dos.close(); + long offset = cos.getCount(); + assertEquals(length, offset); + CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); + DataInputStream dis = new DataInputStream(cis); + KeyValueDecoder decoder = new KeyValueDecoder(dis); + assertTrue(decoder.next()); // First read should pull in the KV + assertFalse(decoder.next()); // Second read should trip over the end-of-stream marker and return false + dis.close(); + assertEquals(length, cis.getCount()); + } + + @Test + public void testThree() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + CountingOutputStream cos = new CountingOutputStream(baos); + DataOutputStream dos = new DataOutputStream(cos); + KeyValueEncoder encoder = new KeyValueEncoder(dos); + final KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("1")); + final KeyValue kv2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"), Bytes.toBytes("2")); + final KeyValue kv3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"), Bytes.toBytes("3")); + final long length = kv1.getLength() + Bytes.SIZEOF_INT; + encoder.write(kv1); + encoder.write(kv2); + encoder.write(kv3); + encoder.flush(); + dos.close(); + long offset = cos.getCount(); + assertEquals(length * 3, offset); + CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); + DataInputStream dis = new DataInputStream(cis); + KeyValueDecoder decoder = new KeyValueDecoder(dis); + assertTrue(decoder.next()); + KeyValue kv = (KeyValue)decoder.get(); + assertTrue(kv1.equals(kv)); + assertTrue(decoder.next()); + kv = (KeyValue)decoder.get(); + assertTrue(kv2.equals(kv)); + assertTrue(decoder.next()); + kv = (KeyValue)decoder.get(); + assertTrue(kv3.equals(kv)); + assertFalse(decoder.next()); + dis.close(); + assertEquals((length * 3), cis.getCount()); + } +} \ No newline at end of file diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml index 38be369..181641b 100644 --- a/hbase-protocol/pom.xml +++ b/hbase-protocol/pom.xml @@ -53,11 +53,16 @@ - - - com.google.protobuf - protobuf-java - + + + org.apache.hbase + hbase-common + + + + com.google.protobuf + protobuf-java + @@ -74,4 +79,4 @@ - \ No newline at end of file + diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index 5aff852..97b5ef8 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -2048,14 +2048,14 @@ public final class ClientProtos { public interface ResultOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .KeyValue keyValue = 1; - java.util.List + // repeated .Cell keyValue = 1; + java.util.List getKeyValueList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue getKeyValue(int index); + org.apache.hbase.protobuf.generated.CellProtos.Cell getKeyValue(int index); int getKeyValueCount(); - java.util.List + java.util.List getKeyValueOrBuilderList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValueOrBuilder getKeyValueOrBuilder( + org.apache.hbase.protobuf.generated.CellProtos.CellOrBuilder getKeyValueOrBuilder( int index); } public static final class Result extends @@ -2086,23 +2086,23 @@ public final class ClientProtos { return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_Result_fieldAccessorTable; } - // repeated .KeyValue keyValue = 1; + // repeated .Cell keyValue = 1; public static final int KEYVALUE_FIELD_NUMBER = 1; - private java.util.List keyValue_; - public java.util.List getKeyValueList() { + private java.util.List keyValue_; + public java.util.List getKeyValueList() { return keyValue_; } - public java.util.List + public java.util.List getKeyValueOrBuilderList() { return keyValue_; } public int getKeyValueCount() { return keyValue_.size(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue getKeyValue(int index) { + public org.apache.hbase.protobuf.generated.CellProtos.Cell getKeyValue(int index) { return keyValue_.get(index); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValueOrBuilder getKeyValueOrBuilder( + public org.apache.hbase.protobuf.generated.CellProtos.CellOrBuilder getKeyValueOrBuilder( int index) { return keyValue_.get(index); } @@ -2115,12 +2115,6 @@ public final class ClientProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getKeyValueCount(); i++) { - if (!getKeyValue(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } memoizedIsInitialized = 1; return true; } @@ -2397,12 +2391,6 @@ public final class ClientProtos { } public final boolean isInitialized() { - for (int i = 0; i < getKeyValueCount(); i++) { - if (!getKeyValue(i).isInitialized()) { - - return false; - } - } return true; } @@ -2430,7 +2418,7 @@ public final class ClientProtos { break; } case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.newBuilder(); + org.apache.hbase.protobuf.generated.CellProtos.Cell.Builder subBuilder = org.apache.hbase.protobuf.generated.CellProtos.Cell.newBuilder(); input.readMessage(subBuilder, extensionRegistry); addKeyValue(subBuilder.buildPartial()); break; @@ -2441,20 +2429,20 @@ public final class ClientProtos { private int bitField0_; - // repeated .KeyValue keyValue = 1; - private java.util.List keyValue_ = + // repeated .Cell keyValue = 1; + private java.util.List keyValue_ = java.util.Collections.emptyList(); private void ensureKeyValueIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { - keyValue_ = new java.util.ArrayList(keyValue_); + keyValue_ = new java.util.ArrayList(keyValue_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValueOrBuilder> keyValueBuilder_; + org.apache.hbase.protobuf.generated.CellProtos.Cell, org.apache.hbase.protobuf.generated.CellProtos.Cell.Builder, org.apache.hbase.protobuf.generated.CellProtos.CellOrBuilder> keyValueBuilder_; - public java.util.List getKeyValueList() { + public java.util.List getKeyValueList() { if (keyValueBuilder_ == null) { return java.util.Collections.unmodifiableList(keyValue_); } else { @@ -2468,7 +2456,7 @@ public final class ClientProtos { return keyValueBuilder_.getCount(); } } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue getKeyValue(int index) { + public org.apache.hbase.protobuf.generated.CellProtos.Cell getKeyValue(int index) { if (keyValueBuilder_ == null) { return keyValue_.get(index); } else { @@ -2476,7 +2464,7 @@ public final class ClientProtos { } } public Builder setKeyValue( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue value) { + int index, org.apache.hbase.protobuf.generated.CellProtos.Cell value) { if (keyValueBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -2490,7 +2478,7 @@ public final class ClientProtos { return this; } public Builder setKeyValue( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder builderForValue) { + int index, org.apache.hbase.protobuf.generated.CellProtos.Cell.Builder builderForValue) { if (keyValueBuilder_ == null) { ensureKeyValueIsMutable(); keyValue_.set(index, builderForValue.build()); @@ -2500,7 +2488,7 @@ public final class ClientProtos { } return this; } - public Builder addKeyValue(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue value) { + public Builder addKeyValue(org.apache.hbase.protobuf.generated.CellProtos.Cell value) { if (keyValueBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -2514,7 +2502,7 @@ public final class ClientProtos { return this; } public Builder addKeyValue( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue value) { + int index, org.apache.hbase.protobuf.generated.CellProtos.Cell value) { if (keyValueBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -2528,7 +2516,7 @@ public final class ClientProtos { return this; } public Builder addKeyValue( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder builderForValue) { + org.apache.hbase.protobuf.generated.CellProtos.Cell.Builder builderForValue) { if (keyValueBuilder_ == null) { ensureKeyValueIsMutable(); keyValue_.add(builderForValue.build()); @@ -2539,7 +2527,7 @@ public final class ClientProtos { return this; } public Builder addKeyValue( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder builderForValue) { + int index, org.apache.hbase.protobuf.generated.CellProtos.Cell.Builder builderForValue) { if (keyValueBuilder_ == null) { ensureKeyValueIsMutable(); keyValue_.add(index, builderForValue.build()); @@ -2550,7 +2538,7 @@ public final class ClientProtos { return this; } public Builder addAllKeyValue( - java.lang.Iterable values) { + java.lang.Iterable values) { if (keyValueBuilder_ == null) { ensureKeyValueIsMutable(); super.addAll(values, keyValue_); @@ -2580,18 +2568,18 @@ public final class ClientProtos { } return this; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder getKeyValueBuilder( + public org.apache.hbase.protobuf.generated.CellProtos.Cell.Builder getKeyValueBuilder( int index) { return getKeyValueFieldBuilder().getBuilder(index); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValueOrBuilder getKeyValueOrBuilder( + public org.apache.hbase.protobuf.generated.CellProtos.CellOrBuilder getKeyValueOrBuilder( int index) { if (keyValueBuilder_ == null) { return keyValue_.get(index); } else { return keyValueBuilder_.getMessageOrBuilder(index); } } - public java.util.List + public java.util.List getKeyValueOrBuilderList() { if (keyValueBuilder_ != null) { return keyValueBuilder_.getMessageOrBuilderList(); @@ -2599,25 +2587,25 @@ public final class ClientProtos { return java.util.Collections.unmodifiableList(keyValue_); } } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder addKeyValueBuilder() { + public org.apache.hbase.protobuf.generated.CellProtos.Cell.Builder addKeyValueBuilder() { return getKeyValueFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.getDefaultInstance()); + org.apache.hbase.protobuf.generated.CellProtos.Cell.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder addKeyValueBuilder( + public org.apache.hbase.protobuf.generated.CellProtos.Cell.Builder addKeyValueBuilder( int index) { return getKeyValueFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.getDefaultInstance()); + index, org.apache.hbase.protobuf.generated.CellProtos.Cell.getDefaultInstance()); } - public java.util.List + public java.util.List getKeyValueBuilderList() { return getKeyValueFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValueOrBuilder> + org.apache.hbase.protobuf.generated.CellProtos.Cell, org.apache.hbase.protobuf.generated.CellProtos.Cell.Builder, org.apache.hbase.protobuf.generated.CellProtos.CellOrBuilder> getKeyValueFieldBuilder() { if (keyValueBuilder_ == null) { keyValueBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValueOrBuilder>( + org.apache.hbase.protobuf.generated.CellProtos.Cell, org.apache.hbase.protobuf.generated.CellProtos.Cell.Builder, org.apache.hbase.protobuf.generated.CellProtos.CellOrBuilder>( keyValue_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), @@ -4380,12 +4368,6 @@ public final class ClientProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (hasResult()) { - if (!getResult().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } memoizedIsInitialized = 1; return true; } @@ -4668,12 +4650,6 @@ public final class ClientProtos { } public final boolean isInitialized() { - if (hasResult()) { - if (!getResult().isInitialized()) { - - return false; - } - } return true; } @@ -4932,12 +4908,6 @@ public final class ClientProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getResultCount(); i++) { - if (!getResult(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } memoizedIsInitialized = 1; return true; } @@ -5246,12 +5216,6 @@ public final class ClientProtos { } public final boolean isInitialized() { - for (int i = 0; i < getResultCount(); i++) { - if (!getResult(i).isInitialized()) { - - return false; - } - } return true; } @@ -9934,12 +9898,6 @@ public final class ClientProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (hasResult()) { - if (!getResult().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } memoizedIsInitialized = 1; return true; } @@ -10222,12 +10180,6 @@ public final class ClientProtos { } public final boolean isInitialized() { - if (hasResult()) { - if (!getResult().isInitialized()) { - - return false; - } - } return true; } @@ -13227,12 +13179,6 @@ public final class ClientProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getResultCount(); i++) { - if (!getResult(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } memoizedIsInitialized = 1; return true; } @@ -13611,12 +13557,6 @@ public final class ClientProtos { } public final boolean isInitialized() { - for (int i = 0; i < getResultCount(); i++) { - if (!getResult(i).isInitialized()) { - - return false; - } - } return true; } @@ -18331,12 +18271,6 @@ public final class ClientProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (hasValue()) { - if (!getValue().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } if (hasException()) { if (!getException().isInitialized()) { memoizedIsInitialized = 0; @@ -18634,12 +18568,6 @@ public final class ClientProtos { } public final boolean isInitialized() { - if (hasValue()) { - if (!getValue().isInitialized()) { - - return false; - } - } if (hasException()) { if (!getException().isInitialized()) { @@ -21060,91 +20988,92 @@ public final class ClientProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\014Client.proto\032\013hbase.proto\032\020Comparator." + - "proto\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tquali" + - "fier\030\002 \003(\014\"\342\001\n\003Get\022\013\n\003row\030\001 \002(\014\022\027\n\006colum" + - "n\030\002 \003(\0132\007.Column\022!\n\tattribute\030\003 \003(\0132\016.Na" + - "meBytesPair\022\027\n\006filter\030\004 \001(\0132\007.Filter\022\035\n\t" + - "timeRange\030\005 \001(\0132\n.TimeRange\022\026\n\013maxVersio" + - "ns\030\006 \001(\r:\0011\022\031\n\013cacheBlocks\030\007 \001(\010:\004true\022\022" + - "\n\nstoreLimit\030\010 \001(\r\022\023\n\013storeOffset\030\t \001(\r\"" + - "%\n\006Result\022\033\n\010keyValue\030\001 \003(\0132\t.KeyValue\"r" + - "\n\nGetRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpe", - "cifier\022\021\n\003get\030\002 \002(\0132\004.Get\022\030\n\020closestRowB" + - "efore\030\003 \001(\010\022\025\n\rexistenceOnly\030\004 \001(\010\"w\n\017Mu" + - "ltiGetRequest\022 \n\006region\030\001 \002(\0132\020.RegionSp" + - "ecifier\022\021\n\003get\030\002 \003(\0132\004.Get\022\030\n\020closestRow" + - "Before\030\003 \001(\010\022\025\n\rexistenceOnly\030\004 \001(\010\"6\n\013G" + - "etResponse\022\027\n\006result\030\001 \001(\0132\007.Result\022\016\n\006e" + - "xists\030\002 \001(\010\";\n\020MultiGetResponse\022\027\n\006resul" + - "t\030\001 \003(\0132\007.Result\022\016\n\006exists\030\002 \003(\010\"\177\n\tCond" + - "ition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqu" + - "alifier\030\003 \002(\014\022!\n\013compareType\030\004 \002(\0162\014.Com", - "pareType\022\037\n\ncomparator\030\005 \002(\0132\013.Comparato" + - "r\"\266\004\n\006Mutate\022\013\n\003row\030\001 \002(\014\022&\n\nmutateType\030" + - "\002 \002(\0162\022.Mutate.MutateType\022(\n\013columnValue" + - "\030\003 \003(\0132\023.Mutate.ColumnValue\022!\n\tattribute" + - "\030\004 \003(\0132\016.NameBytesPair\022\021\n\ttimestamp\030\005 \001(" + - "\004\022\030\n\nwriteToWAL\030\006 \001(\010:\004true\022\035\n\ttimeRange" + - "\030\n \001(\0132\n.TimeRange\032\310\001\n\013ColumnValue\022\016\n\006fa" + - "mily\030\001 \002(\014\022:\n\016qualifierValue\030\002 \003(\0132\".Mut" + - "ate.ColumnValue.QualifierValue\032m\n\016Qualif" + - "ierValue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005value\030\002 \001", - "(\014\022\021\n\ttimestamp\030\003 \001(\004\022&\n\ndeleteType\030\004 \001(" + - "\0162\022.Mutate.DeleteType\"<\n\nMutateType\022\n\n\006A" + - "PPEND\020\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELET" + - "E\020\003\"U\n\nDeleteType\022\026\n\022DELETE_ONE_VERSION\020" + - "\000\022\034\n\030DELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDELET" + - "E_FAMILY\020\002\"i\n\rMutateRequest\022 \n\006region\030\001 " + - "\002(\0132\020.RegionSpecifier\022\027\n\006mutate\030\002 \002(\0132\007." + - "Mutate\022\035\n\tcondition\030\003 \001(\0132\n.Condition\"<\n" + - "\016MutateResponse\022\027\n\006result\030\001 \001(\0132\007.Result" + - "\022\021\n\tprocessed\030\002 \001(\010\"\307\002\n\004Scan\022\027\n\006column\030\001", - " \003(\0132\007.Column\022!\n\tattribute\030\002 \003(\0132\016.NameB" + - "ytesPair\022\020\n\010startRow\030\003 \001(\014\022\017\n\007stopRow\030\004 " + - "\001(\014\022\027\n\006filter\030\005 \001(\0132\007.Filter\022\035\n\ttimeRang" + - "e\030\006 \001(\0132\n.TimeRange\022\026\n\013maxVersions\030\007 \001(\r" + - ":\0011\022\031\n\013cacheBlocks\030\010 \001(\010:\004true\022\021\n\tbatchS" + - "ize\030\t \001(\r\022\025\n\rmaxResultSize\030\n \001(\004\022\022\n\nstor" + - "eLimit\030\013 \001(\r\022\023\n\013storeOffset\030\014 \001(\r\022\"\n\032loa" + - "dColumnFamiliesOnDemand\030\r \001(\010\"\230\001\n\013ScanRe" + - "quest\022 \n\006region\030\001 \001(\0132\020.RegionSpecifier\022" + - "\023\n\004scan\030\002 \001(\0132\005.Scan\022\021\n\tscannerId\030\003 \001(\004\022", - "\024\n\014numberOfRows\030\004 \001(\r\022\024\n\014closeScanner\030\005 " + - "\001(\010\022\023\n\013nextCallSeq\030\006 \001(\004\"u\n\014ScanResponse" + - "\022\027\n\006result\030\001 \003(\0132\007.Result\022\021\n\tscannerId\030\002" + - " \001(\004\022\023\n\013moreResults\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022\027" + - "\n\017resultSizeBytes\030\005 \001(\004\"\260\001\n\024BulkLoadHFil" + - "eRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifi" + - "er\0224\n\nfamilyPath\030\002 \003(\0132 .BulkLoadHFileRe" + - "quest.FamilyPath\022\024\n\014assignSeqNum\030\003 \001(\010\032*" + - "\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002" + - "(\t\"\'\n\025BulkLoadHFileResponse\022\016\n\006loaded\030\001 ", - "\002(\010\"_\n\026CoprocessorServiceCall\022\013\n\003row\030\001 \002" + - "(\014\022\023\n\013serviceName\030\002 \002(\t\022\022\n\nmethodName\030\003 " + - "\002(\t\022\017\n\007request\030\004 \002(\014\"d\n\031CoprocessorServi" + - "ceRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecif" + - "ier\022%\n\004call\030\002 \002(\0132\027.CoprocessorServiceCa" + - "ll\"]\n\032CoprocessorServiceResponse\022 \n\006regi" + - "on\030\001 \002(\0132\020.RegionSpecifier\022\035\n\005value\030\002 \002(" + - "\0132\016.NameBytesPair\"9\n\013MultiAction\022\027\n\006muta" + - "te\030\001 \001(\0132\007.Mutate\022\021\n\003get\030\002 \001(\0132\004.Get\"I\n\014" + - "ActionResult\022\026\n\005value\030\001 \001(\0132\007.Result\022!\n\t", - "exception\030\002 \001(\0132\016.NameBytesPair\"^\n\014Multi" + - "Request\022 \n\006region\030\001 \002(\0132\020.RegionSpecifie" + - "r\022\034\n\006action\030\002 \003(\0132\014.MultiAction\022\016\n\006atomi" + - "c\030\003 \001(\010\".\n\rMultiResponse\022\035\n\006result\030\001 \003(\013" + - "2\r.ActionResult2\342\002\n\rClientService\022 \n\003get" + - "\022\013.GetRequest\032\014.GetResponse\022/\n\010multiGet\022" + - "\020.MultiGetRequest\032\021.MultiGetResponse\022)\n\006" + - "mutate\022\016.MutateRequest\032\017.MutateResponse\022" + - "#\n\004scan\022\014.ScanRequest\032\r.ScanResponse\022>\n\r" + - "bulkLoadHFile\022\025.BulkLoadHFileRequest\032\026.B", - "ulkLoadHFileResponse\022F\n\013execService\022\032.Co" + - "processorServiceRequest\032\033.CoprocessorSer" + - "viceResponse\022&\n\005multi\022\r.MultiRequest\032\016.M" + - "ultiResponseBB\n*org.apache.hadoop.hbase." + - "protobuf.generatedB\014ClientProtosH\001\210\001\001\240\001\001" + "\n\014Client.proto\032\nCell.proto\032\013hbase.proto\032" + + "\020Comparator.proto\"+\n\006Column\022\016\n\006family\030\001 " + + "\002(\014\022\021\n\tqualifier\030\002 \003(\014\"\342\001\n\003Get\022\013\n\003row\030\001 " + + "\002(\014\022\027\n\006column\030\002 \003(\0132\007.Column\022!\n\tattribut" + + "e\030\003 \003(\0132\016.NameBytesPair\022\027\n\006filter\030\004 \001(\0132" + + "\007.Filter\022\035\n\ttimeRange\030\005 \001(\0132\n.TimeRange\022" + + "\026\n\013maxVersions\030\006 \001(\r:\0011\022\031\n\013cacheBlocks\030\007" + + " \001(\010:\004true\022\022\n\nstoreLimit\030\010 \001(\r\022\023\n\013storeO" + + "ffset\030\t \001(\r\"!\n\006Result\022\027\n\010keyValue\030\001 \003(\0132" + + "\005.Cell\"r\n\nGetRequest\022 \n\006region\030\001 \002(\0132\020.R", + "egionSpecifier\022\021\n\003get\030\002 \002(\0132\004.Get\022\030\n\020clo" + + "sestRowBefore\030\003 \001(\010\022\025\n\rexistenceOnly\030\004 \001" + + "(\010\"w\n\017MultiGetRequest\022 \n\006region\030\001 \002(\0132\020." + + "RegionSpecifier\022\021\n\003get\030\002 \003(\0132\004.Get\022\030\n\020cl" + + "osestRowBefore\030\003 \001(\010\022\025\n\rexistenceOnly\030\004 " + + "\001(\010\"6\n\013GetResponse\022\027\n\006result\030\001 \001(\0132\007.Res" + + "ult\022\016\n\006exists\030\002 \001(\010\";\n\020MultiGetResponse\022" + + "\027\n\006result\030\001 \003(\0132\007.Result\022\016\n\006exists\030\002 \003(\010" + + "\"\177\n\tCondition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002" + + "(\014\022\021\n\tqualifier\030\003 \002(\014\022!\n\013compareType\030\004 \002", + "(\0162\014.CompareType\022\037\n\ncomparator\030\005 \002(\0132\013.C" + + "omparator\"\266\004\n\006Mutate\022\013\n\003row\030\001 \002(\014\022&\n\nmut" + + "ateType\030\002 \002(\0162\022.Mutate.MutateType\022(\n\013col" + + "umnValue\030\003 \003(\0132\023.Mutate.ColumnValue\022!\n\ta" + + "ttribute\030\004 \003(\0132\016.NameBytesPair\022\021\n\ttimest" + + "amp\030\005 \001(\004\022\030\n\nwriteToWAL\030\006 \001(\010:\004true\022\035\n\tt" + + "imeRange\030\n \001(\0132\n.TimeRange\032\310\001\n\013ColumnVal" + + "ue\022\016\n\006family\030\001 \002(\014\022:\n\016qualifierValue\030\002 \003" + + "(\0132\".Mutate.ColumnValue.QualifierValue\032m" + + "\n\016QualifierValue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005v", + "alue\030\002 \001(\014\022\021\n\ttimestamp\030\003 \001(\004\022&\n\ndeleteT" + + "ype\030\004 \001(\0162\022.Mutate.DeleteType\"<\n\nMutateT" + + "ype\022\n\n\006APPEND\020\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022" + + "\n\n\006DELETE\020\003\"U\n\nDeleteType\022\026\n\022DELETE_ONE_" + + "VERSION\020\000\022\034\n\030DELETE_MULTIPLE_VERSIONS\020\001\022" + + "\021\n\rDELETE_FAMILY\020\002\"i\n\rMutateRequest\022 \n\006r" + + "egion\030\001 \002(\0132\020.RegionSpecifier\022\027\n\006mutate\030" + + "\002 \002(\0132\007.Mutate\022\035\n\tcondition\030\003 \001(\0132\n.Cond" + + "ition\"<\n\016MutateResponse\022\027\n\006result\030\001 \001(\0132" + + "\007.Result\022\021\n\tprocessed\030\002 \001(\010\"\307\002\n\004Scan\022\027\n\006", + "column\030\001 \003(\0132\007.Column\022!\n\tattribute\030\002 \003(\013" + + "2\016.NameBytesPair\022\020\n\010startRow\030\003 \001(\014\022\017\n\007st" + + "opRow\030\004 \001(\014\022\027\n\006filter\030\005 \001(\0132\007.Filter\022\035\n\t" + + "timeRange\030\006 \001(\0132\n.TimeRange\022\026\n\013maxVersio" + + "ns\030\007 \001(\r:\0011\022\031\n\013cacheBlocks\030\010 \001(\010:\004true\022\021" + + "\n\tbatchSize\030\t \001(\r\022\025\n\rmaxResultSize\030\n \001(\004" + + "\022\022\n\nstoreLimit\030\013 \001(\r\022\023\n\013storeOffset\030\014 \001(" + + "\r\022\"\n\032loadColumnFamiliesOnDemand\030\r \001(\010\"\230\001" + + "\n\013ScanRequest\022 \n\006region\030\001 \001(\0132\020.RegionSp" + + "ecifier\022\023\n\004scan\030\002 \001(\0132\005.Scan\022\021\n\tscannerI", + "d\030\003 \001(\004\022\024\n\014numberOfRows\030\004 \001(\r\022\024\n\014closeSc" + + "anner\030\005 \001(\010\022\023\n\013nextCallSeq\030\006 \001(\004\"u\n\014Scan" + + "Response\022\027\n\006result\030\001 \003(\0132\007.Result\022\021\n\tsca" + + "nnerId\030\002 \001(\004\022\023\n\013moreResults\030\003 \001(\010\022\013\n\003ttl" + + "\030\004 \001(\r\022\027\n\017resultSizeBytes\030\005 \001(\004\"\260\001\n\024Bulk" + + "LoadHFileRequest\022 \n\006region\030\001 \002(\0132\020.Regio" + + "nSpecifier\0224\n\nfamilyPath\030\002 \003(\0132 .BulkLoa" + + "dHFileRequest.FamilyPath\022\024\n\014assignSeqNum" + + "\030\003 \001(\010\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004" + + "path\030\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022\016\n\006l", + "oaded\030\001 \002(\010\"_\n\026CoprocessorServiceCall\022\013\n" + + "\003row\030\001 \002(\014\022\023\n\013serviceName\030\002 \002(\t\022\022\n\nmetho" + + "dName\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"d\n\031Coproces" + + "sorServiceRequest\022 \n\006region\030\001 \002(\0132\020.Regi" + + "onSpecifier\022%\n\004call\030\002 \002(\0132\027.CoprocessorS" + + "erviceCall\"]\n\032CoprocessorServiceResponse" + + "\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\035\n\005va" + + "lue\030\002 \002(\0132\016.NameBytesPair\"9\n\013MultiAction" + + "\022\027\n\006mutate\030\001 \001(\0132\007.Mutate\022\021\n\003get\030\002 \001(\0132\004" + + ".Get\"I\n\014ActionResult\022\026\n\005value\030\001 \001(\0132\007.Re", + "sult\022!\n\texception\030\002 \001(\0132\016.NameBytesPair\"" + + "^\n\014MultiRequest\022 \n\006region\030\001 \002(\0132\020.Region" + + "Specifier\022\034\n\006action\030\002 \003(\0132\014.MultiAction\022" + + "\016\n\006atomic\030\003 \001(\010\".\n\rMultiResponse\022\035\n\006resu" + + "lt\030\001 \003(\0132\r.ActionResult2\342\002\n\rClientServic" + + "e\022 \n\003get\022\013.GetRequest\032\014.GetResponse\022/\n\010m" + + "ultiGet\022\020.MultiGetRequest\032\021.MultiGetResp" + + "onse\022)\n\006mutate\022\016.MutateRequest\032\017.MutateR" + + "esponse\022#\n\004scan\022\014.ScanRequest\032\r.ScanResp" + + "onse\022>\n\rbulkLoadHFile\022\025.BulkLoadHFileReq", + "uest\032\026.BulkLoadHFileResponse\022F\n\013execServ" + + "ice\022\032.CoprocessorServiceRequest\032\033.Coproc" + + "essorServiceResponse\022&\n\005multi\022\r.MultiReq" + + "uest\032\016.MultiResponseBB\n*org.apache.hadoo" + + "p.hbase.protobuf.generatedB\014ClientProtos" + + "H\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -21365,6 +21294,7 @@ public final class ClientProtos { com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hbase.protobuf.generated.CellProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.getDescriptor(), }, assigner); diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index a282e86..b1888f5 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -92,87 +92,6 @@ public final class HBaseProtos { // @@protoc_insertion_point(enum_scope:CompareType) } - public enum KeyType - implements com.google.protobuf.ProtocolMessageEnum { - MINIMUM(0, 0), - PUT(1, 4), - DELETE(2, 8), - DELETE_COLUMN(3, 12), - DELETE_FAMILY(4, 14), - MAXIMUM(5, 255), - ; - - public static final int MINIMUM_VALUE = 0; - public static final int PUT_VALUE = 4; - public static final int DELETE_VALUE = 8; - public static final int DELETE_COLUMN_VALUE = 12; - public static final int DELETE_FAMILY_VALUE = 14; - public static final int MAXIMUM_VALUE = 255; - - - public final int getNumber() { return value; } - - public static KeyType valueOf(int value) { - switch (value) { - case 0: return MINIMUM; - case 4: return PUT; - case 8: return DELETE; - case 12: return DELETE_COLUMN; - case 14: return DELETE_FAMILY; - case 255: return MAXIMUM; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public KeyType findValueByNumber(int number) { - return KeyType.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor().getEnumTypes().get(1); - } - - private static final KeyType[] VALUES = { - MINIMUM, PUT, DELETE, DELETE_COLUMN, DELETE_FAMILY, MAXIMUM, - }; - - public static KeyType valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private KeyType(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:KeyType) - } - public interface TableSchemaOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -7331,9 +7250,9 @@ public final class HBaseProtos { boolean hasTimestamp(); long getTimestamp(); - // optional .KeyType keyType = 5; + // optional .CellType keyType = 5; boolean hasKeyType(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType getKeyType(); + org.apache.hbase.protobuf.generated.CellProtos.CellType getKeyType(); // optional bytes value = 6; boolean hasValue(); @@ -7408,13 +7327,13 @@ public final class HBaseProtos { return timestamp_; } - // optional .KeyType keyType = 5; + // optional .CellType keyType = 5; public static final int KEYTYPE_FIELD_NUMBER = 5; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType keyType_; + private org.apache.hbase.protobuf.generated.CellProtos.CellType keyType_; public boolean hasKeyType() { return ((bitField0_ & 0x00000010) == 0x00000010); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType getKeyType() { + public org.apache.hbase.protobuf.generated.CellProtos.CellType getKeyType() { return keyType_; } @@ -7433,7 +7352,7 @@ public final class HBaseProtos { family_ = com.google.protobuf.ByteString.EMPTY; qualifier_ = com.google.protobuf.ByteString.EMPTY; timestamp_ = 0L; - keyType_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType.MINIMUM; + keyType_ = org.apache.hbase.protobuf.generated.CellProtos.CellType.MINIMUM; value_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; @@ -7721,7 +7640,7 @@ public final class HBaseProtos { bitField0_ = (bitField0_ & ~0x00000004); timestamp_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); - keyType_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType.MINIMUM; + keyType_ = org.apache.hbase.protobuf.generated.CellProtos.CellType.MINIMUM; bitField0_ = (bitField0_ & ~0x00000010); value_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000020); @@ -7886,7 +7805,7 @@ public final class HBaseProtos { } case 40: { int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType.valueOf(rawValue); + org.apache.hbase.protobuf.generated.CellProtos.CellType value = org.apache.hbase.protobuf.generated.CellProtos.CellType.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(5, rawValue); } else { @@ -7999,15 +7918,15 @@ public final class HBaseProtos { return this; } - // optional .KeyType keyType = 5; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType keyType_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType.MINIMUM; + // optional .CellType keyType = 5; + private org.apache.hbase.protobuf.generated.CellProtos.CellType keyType_ = org.apache.hbase.protobuf.generated.CellProtos.CellType.MINIMUM; public boolean hasKeyType() { return ((bitField0_ & 0x00000010) == 0x00000010); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType getKeyType() { + public org.apache.hbase.protobuf.generated.CellProtos.CellType getKeyType() { return keyType_; } - public Builder setKeyType(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType value) { + public Builder setKeyType(org.apache.hbase.protobuf.generated.CellProtos.CellType value) { if (value == null) { throw new NullPointerException(); } @@ -8018,7 +7937,7 @@ public final class HBaseProtos { } public Builder clearKeyType() { bitField0_ = (bitField0_ & ~0x00000010); - keyType_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyType.MINIMUM; + keyType_ = org.apache.hbase.protobuf.generated.CellProtos.CellType.MINIMUM; onChanged(); return this; } @@ -11740,58 +11659,55 @@ public final class HBaseProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\013hbase.proto\"\225\001\n\013TableSchema\022\014\n\004name\030\001 " + - "\001(\014\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPair" + - "\022+\n\016columnFamilies\030\003 \003(\0132\023.ColumnFamilyS" + - "chema\022&\n\rconfiguration\030\004 \003(\0132\017.NameStrin" + - "gPair\"o\n\022ColumnFamilySchema\022\014\n\004name\030\001 \002(" + - "\014\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPair\022&" + - "\n\rconfiguration\030\003 \003(\0132\017.NameStringPair\"s" + - "\n\nRegionInfo\022\020\n\010regionId\030\001 \002(\004\022\021\n\ttableN" + - "ame\030\002 \002(\014\022\020\n\010startKey\030\003 \001(\014\022\016\n\006endKey\030\004 " + - "\001(\014\022\017\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\"\225\001\n\017", - "RegionSpecifier\0222\n\004type\030\001 \002(\0162$.RegionSp" + - "ecifier.RegionSpecifierType\022\r\n\005value\030\002 \002" + - "(\014\"?\n\023RegionSpecifierType\022\017\n\013REGION_NAME" + - "\020\001\022\027\n\023ENCODED_REGION_NAME\020\002\"\260\003\n\nRegionLo" + - "ad\022)\n\017regionSpecifier\030\001 \002(\0132\020.RegionSpec" + - "ifier\022\016\n\006stores\030\002 \001(\r\022\022\n\nstorefiles\030\003 \001(" + - "\r\022\037\n\027storeUncompressedSizeMB\030\004 \001(\r\022\027\n\017st" + - "orefileSizeMB\030\005 \001(\r\022\026\n\016memstoreSizeMB\030\006 " + - "\001(\r\022\034\n\024storefileIndexSizeMB\030\007 \001(\r\022\031\n\021rea" + - "dRequestsCount\030\010 \001(\004\022\032\n\022writeRequestsCou", - "nt\030\t \001(\004\022\032\n\022totalCompactingKVs\030\n \001(\004\022\033\n\023" + - "currentCompactedKVs\030\013 \001(\004\022\027\n\017rootIndexSi" + - "zeKB\030\014 \001(\r\022\036\n\026totalStaticIndexSizeKB\030\r \001" + - "(\r\022\036\n\026totalStaticBloomSizeKB\030\016 \001(\r\022\032\n\022co" + - "mpleteSequenceId\030\017 \001(\004\"\372\001\n\nServerLoad\022\030\n" + - "\020numberOfRequests\030\001 \001(\r\022\035\n\025totalNumberOf" + - "Requests\030\002 \001(\r\022\022\n\nusedHeapMB\030\003 \001(\r\022\021\n\tma" + - "xHeapMB\030\004 \001(\r\022 \n\013regionLoads\030\005 \003(\0132\013.Reg" + - "ionLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coproces" + - "sor\022\027\n\017reportStartTime\030\007 \001(\004\022\025\n\rreportEn", - "dTime\030\010 \001(\004\022\026\n\016infoServerPort\030\t \001(\r\"%\n\tT" + - "imeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"0\n\006Fi" + - "lter\022\014\n\004name\030\001 \002(\t\022\030\n\020serializedFilter\030\002" + - " \001(\014\"w\n\010KeyValue\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030" + - "\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022\021\n\ttimestamp\030\004 " + - "\001(\004\022\031\n\007keyType\030\005 \001(\0162\010.KeyType\022\r\n\005value\030" + - "\006 \001(\014\"?\n\nServerName\022\020\n\010hostName\030\001 \002(\t\022\014\n" + - "\004port\030\002 \001(\r\022\021\n\tstartCode\030\003 \001(\004\"\033\n\013Coproc" + - "essor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n" + - "\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesP", - "air\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016Byte" + - "sBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(" + - "\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005valu" + - "e\030\002 \001(\003\"\n\n\010EmptyMsg\"\032\n\007LongMsg\022\017\n\007longMs" + - "g\030\001 \002(\003*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS" + - "_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n" + - "\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_O" + - "P\020\006*_\n\007KeyType\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006" + - "DELETE\020\010\022\021\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE_FA" + - "MILY\020\016\022\014\n\007MAXIMUM\020\377\001B>\n*org.apache.hadoo", - "p.hbase.protobuf.generatedB\013HBaseProtosH" + - "\001\240\001\001" + "\n\013hbase.proto\032\nCell.proto\"\225\001\n\013TableSchem" + + "a\022\014\n\004name\030\001 \001(\014\022#\n\nattributes\030\002 \003(\0132\017.By" + + "tesBytesPair\022+\n\016columnFamilies\030\003 \003(\0132\023.C" + + "olumnFamilySchema\022&\n\rconfiguration\030\004 \003(\013" + + "2\017.NameStringPair\"o\n\022ColumnFamilySchema\022" + + "\014\n\004name\030\001 \002(\014\022#\n\nattributes\030\002 \003(\0132\017.Byte" + + "sBytesPair\022&\n\rconfiguration\030\003 \003(\0132\017.Name" + + "StringPair\"s\n\nRegionInfo\022\020\n\010regionId\030\001 \002" + + "(\004\022\021\n\ttableName\030\002 \002(\014\022\020\n\010startKey\030\003 \001(\014\022" + + "\016\n\006endKey\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022\r\n\005spli", + "t\030\006 \001(\010\"\225\001\n\017RegionSpecifier\0222\n\004type\030\001 \002(" + + "\0162$.RegionSpecifier.RegionSpecifierType\022" + + "\r\n\005value\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017\n" + + "\013REGION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002\"" + + "\260\003\n\nRegionLoad\022)\n\017regionSpecifier\030\001 \002(\0132" + + "\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\nsto" + + "refiles\030\003 \001(\r\022\037\n\027storeUncompressedSizeMB" + + "\030\004 \001(\r\022\027\n\017storefileSizeMB\030\005 \001(\r\022\026\n\016memst" + + "oreSizeMB\030\006 \001(\r\022\034\n\024storefileIndexSizeMB\030" + + "\007 \001(\r\022\031\n\021readRequestsCount\030\010 \001(\004\022\032\n\022writ", + "eRequestsCount\030\t \001(\004\022\032\n\022totalCompactingK" + + "Vs\030\n \001(\004\022\033\n\023currentCompactedKVs\030\013 \001(\004\022\027\n" + + "\017rootIndexSizeKB\030\014 \001(\r\022\036\n\026totalStaticInd" + + "exSizeKB\030\r \001(\r\022\036\n\026totalStaticBloomSizeKB" + + "\030\016 \001(\r\022\032\n\022completeSequenceId\030\017 \001(\004\"\372\001\n\nS" + + "erverLoad\022\030\n\020numberOfRequests\030\001 \001(\r\022\035\n\025t" + + "otalNumberOfRequests\030\002 \001(\r\022\022\n\nusedHeapMB" + + "\030\003 \001(\r\022\021\n\tmaxHeapMB\030\004 \001(\r\022 \n\013regionLoads" + + "\030\005 \003(\0132\013.RegionLoad\022\"\n\014coprocessors\030\006 \003(" + + "\0132\014.Coprocessor\022\027\n\017reportStartTime\030\007 \001(\004", + "\022\025\n\rreportEndTime\030\010 \001(\004\022\026\n\016infoServerPor" + + "t\030\t \001(\r\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to" + + "\030\002 \001(\004\"0\n\006Filter\022\014\n\004name\030\001 \002(\t\022\030\n\020serial" + + "izedFilter\030\002 \001(\014\"x\n\010KeyValue\022\013\n\003row\030\001 \002(" + + "\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022\021\n\t" + + "timestamp\030\004 \001(\004\022\032\n\007keyType\030\005 \001(\0162\t.CellT" + + "ype\022\r\n\005value\030\006 \001(\014\"?\n\nServerName\022\020\n\010host" + + "Name\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\021\n\tstartCode\030\003 " + + "\001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016Name" + + "StringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"", + ",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030" + + "\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016" + + "\n\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030" + + "\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\n\n\010EmptyMsg\"\032\n\007Long" + + "Msg\022\017\n\007longMsg\030\001 \002(\003*r\n\013CompareType\022\010\n\004L" + + "ESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tN" + + "OT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREA" + + "TER\020\005\022\t\n\005NO_OP\020\006B>\n*org.apache.hadoop.hb" + + "ase.protobuf.generatedB\013HBaseProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -11940,6 +11856,7 @@ public final class HBaseProtos { com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hbase.protobuf.generated.CellProtos.getDescriptor(), }, assigner); } diff --git a/hbase-protocol/src/main/protobuf/Aggregate.proto b/hbase-protocol/src/main/protobuf/Aggregate.proto index 5707e14..ca59f1f 100644 --- a/hbase-protocol/src/main/protobuf/Aggregate.proto +++ b/hbase-protocol/src/main/protobuf/Aggregate.proto @@ -59,4 +59,4 @@ service AggregateService { rpc getAvg (AggregateArgument) returns (AggregateResponse); rpc getStd (AggregateArgument) returns (AggregateResponse); rpc getMedian (AggregateArgument) returns (AggregateResponse); -} \ No newline at end of file +} diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index d725b38..f52b516 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -18,6 +18,8 @@ // This file contains protocol buffers that are used for Client service. +import "Cell.proto"; + option java_package = "org.apache.hadoop.hbase.protobuf.generated"; option java_outer_classname = "ClientProtos"; option java_generic_services = true; @@ -51,7 +53,7 @@ message Get { } message Result { - repeated KeyValue keyValue = 1; + repeated Cell keyValue = 1; } /** diff --git a/hbase-protocol/src/main/protobuf/README.txt b/hbase-protocol/src/main/protobuf/README.txt index f979619..3575819 100644 --- a/hbase-protocol/src/main/protobuf/README.txt +++ b/hbase-protocol/src/main/protobuf/README.txt @@ -20,7 +20,7 @@ should be able to just copy and paste the below into a terminal and hit return fi for PROTO_FILE in $UNIX_PROTO_DIR/*.proto do - protoc -I$PROTO_DIR --java_out=$JAVA_DIR $PROTO_FILE + protoc -I$PROTO_DIR -I"../hbase-common/src/main/protobuf" --java_out=$JAVA_DIR $PROTO_FILE done After you've done the above, check it in and then check it in (or post a patch diff --git a/hbase-protocol/src/main/protobuf/hbase.proto b/hbase-protocol/src/main/protobuf/hbase.proto index aece7b0..2d4f410 100644 --- a/hbase-protocol/src/main/protobuf/hbase.proto +++ b/hbase-protocol/src/main/protobuf/hbase.proto @@ -17,6 +17,7 @@ */ // This file contains protocol buffers that are shared throughout HBase +import "Cell.proto"; option java_package = "org.apache.hadoop.hbase.protobuf.generated"; option java_outer_classname = "HBaseProtos"; @@ -201,21 +202,6 @@ enum CompareType { } /** - * The type of the key in a KeyValue. - */ -enum KeyType { - MINIMUM = 0; - PUT = 4; - - DELETE = 8; - DELETE_COLUMN = 12; - DELETE_FAMILY = 14; - - // MAXIMUM is used when searching; you look from maximum on down. - MAXIMUM = 255; -} - -/** * Protocol buffer version of KeyValue. * It doesn't have those transient parameters */ @@ -224,7 +210,7 @@ message KeyValue { required bytes family = 2; required bytes qualifier = 3; optional uint64 timestamp = 4; - optional KeyType keyType = 5; + optional CellType keyType = 5; optional bytes value = 6; } diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 53c643b..90bf291 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -57,8 +57,8 @@ - org.apache.maven.plugins @@ -69,8 +69,8 @@ org/apache/hadoop/hbase/mapreduce/Driver - org/apache/jute/** @@ -207,7 +207,7 @@ - org.eclipse.m2e @@ -532,7 +532,7 @@ - @@ -550,8 +550,8 @@ - hadoop-1.0 diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java index 9e15bbb..aab9074 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -34,6 +34,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.SplitKeyValue; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hbase.Cell; /** * Single row result of a {@link Get} or {@link Scan} query.

    @@ -105,7 +106,8 @@ public class Result { * are already sorted * @param kvs List of KeyValues */ - public Result(List kvs) { + public Result(List kvs) { + // TODO: Here we presume the passed in Cells are KVs. One day this won't always be so. this(kvs.toArray(new KeyValue[kvs.size()])); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java index 925801a..590baf5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java @@ -16,14 +16,10 @@ */ package org.apache.hadoop.hbase.io.encoding; -import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; -import java.io.FilterOutputStream; import java.io.IOException; -import java.lang.reflect.Field; import java.nio.ByteBuffer; -import java.util.Arrays; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.KeyValue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java index 4223dbe..c6e5adc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.mapreduce; +import java.io.DataInputStream; +import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; @@ -45,43 +45,41 @@ public class KeyValueSerialization implements Serialization { } public static class KeyValueDeserializer implements Deserializer { - private InputStream is; + private DataInputStream dis; @Override public void close() throws IOException { - this.is.close(); + this.dis.close(); } @Override public KeyValue deserialize(KeyValue ignore) throws IOException { // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO - HBaseProtos.KeyValue proto = - HBaseProtos.KeyValue.parseDelimitedFrom(this.is); - return ProtobufUtil.toKeyValue(proto); + return KeyValue.create(this.dis); } @Override public void open(InputStream is) throws IOException { - this.is = is; + this.dis = new DataInputStream(is); } } public static class KeyValueSerializer implements Serializer { - private OutputStream os; + private DataOutputStream dos; @Override public void close() throws IOException { - this.os.close(); + this.dos.close(); } @Override public void open(OutputStream os) throws IOException { - this.os = os; + this.dos = new DataOutputStream(os); } @Override public void serialize(KeyValue kv) throws IOException { - ProtobufUtil.toKeyValue(kv).writeDelimitedTo(this.os); + KeyValue.write(kv, this.dos); } } } \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 08f8ba6..0c77ca4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -122,6 +122,8 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token; import org.apache.hbase.Cell; +import org.apache.hbase.CellTool; +import org.apache.hbase.protobuf.generated.CellProtos; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.ListMultimap; @@ -831,7 +833,7 @@ public final class ProtobufUtil { Cell [] cells = result.raw(); if (cells != null) { for (Cell c : cells) { - builder.addKeyValue(toKeyValue(c)); + builder.addKeyValue(toCell(c)); } } return builder.build(); @@ -844,12 +846,12 @@ public final class ProtobufUtil { * @return the converted client Result */ public static Result toResult(final ClientProtos.Result proto) { - List values = proto.getKeyValueList(); - List keyValues = new ArrayList(values.size()); - for (HBaseProtos.KeyValue kv: values) { - keyValues.add(toKeyValue(kv)); + List values = proto.getKeyValueList(); + List cells = new ArrayList(values.size()); + for (CellProtos.Cell c: values) { + cells.add(toCell(c)); } - return new Result(keyValues); + return new Result(cells); } /** @@ -1792,33 +1794,31 @@ public final class ProtobufUtil { throw new IOException(se); } - public static HBaseProtos.KeyValue toKeyValue(final Cell kv) { + public static CellProtos.Cell toCell(final Cell kv) { // Doing this is going to kill us if we do it for all data passed. // St.Ack 20121205 - // TODO: Do a Cell version - HBaseProtos.KeyValue.Builder kvbuilder = HBaseProtos.KeyValue.newBuilder(); + CellProtos.Cell.Builder kvbuilder = CellProtos.Cell.newBuilder(); kvbuilder.setRow(ByteString.copyFrom(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); kvbuilder.setFamily(ByteString.copyFrom(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength())); kvbuilder.setQualifier(ByteString.copyFrom(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength())); - kvbuilder.setKeyType(HBaseProtos.KeyType.valueOf(kv.getTypeByte())); + kvbuilder.setCellType(CellProtos.CellType.valueOf(kv.getTypeByte())); kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(ByteString.copyFrom(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); return kvbuilder.build(); } - public static KeyValue toKeyValue(final HBaseProtos.KeyValue kv) { + public static Cell toCell(final CellProtos.Cell cell) { // Doing this is going to kill us if we do it for all data passed. // St.Ack 20121205 - // TODO: Do a Cell version - return new KeyValue(kv.getRow().toByteArray(), - kv.getFamily().toByteArray(), - kv.getQualifier().toByteArray(), - kv.getTimestamp(), - KeyValue.Type.codeToType((byte)kv.getKeyType().getNumber()), - kv.getValue().toByteArray()); + return CellTool.createCell(cell.getRow().toByteArray(), + cell.getFamily().toByteArray(), + cell.getQualifier().toByteArray(), + cell.getTimestamp(), + (byte)cell.getCellType().getNumber(), + cell.getValue().toByteArray()); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java index db1f7aa..515516e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hbase.cell.CellComparator; +import org.apache.hbase.CellComparator; import org.junit.Test; import org.junit.experimental.categories.Category; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 49a72c4..f28c24f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -90,7 +90,7 @@ import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.cell.CellComparator; +import org.apache.hbase.CellComparator; import org.junit.Assert; import org.junit.Test; import org.junit.experimental.categories.Category; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java index 23d71d3..9eb531f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hbase.cell.CellComparator; +import org.apache.hbase.CellComparator; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith;