.../java/org/apache/hadoop/hbase/HRegionInfo.java | 3 +- .../org/apache/hadoop/hbase/filter/FilterList.java | 4 +- .../apache/hadoop/hbase/filter/ParseFilter.java | 5 +- .../hadoop/hbase/client/TestClientNoCluster.java | 4 +- .../org/apache/hadoop/hbase/CellComparator.java | 667 +++++++++++---- .../java/org/apache/hadoop/hbase/CellUtil.java | 239 ++++++ .../java/org/apache/hadoop/hbase/KeyValue.java | 926 +-------------------- .../org/apache/hadoop/hbase/KeyValueTestUtil.java | 2 +- .../java/org/apache/hadoop/hbase/KeyValueUtil.java | 170 +++- .../java/org/apache/hadoop/hbase/TableName.java | 3 +- .../apache/hadoop/hbase/codec/KeyValueCodec.java | 2 +- .../hadoop/hbase/codec/KeyValueCodecWithTags.java | 2 +- .../io/encoding/BufferedDataBlockEncoder.java | 31 +- .../hbase/io/encoding/CopyKeyDataBlockEncoder.java | 4 +- .../hadoop/hbase/io/encoding/DataBlockEncoder.java | 8 +- .../hbase/io/encoding/DiffKeyDeltaEncoder.java | 4 +- .../hbase/io/encoding/FastDiffDeltaEncoder.java | 4 +- .../hbase/io/encoding/PrefixKeyDeltaEncoder.java | 4 +- .../apache/hadoop/hbase/TestCellComparator.java | 58 +- .../java/org/apache/hadoop/hbase/TestKeyValue.java | 47 +- .../apache/hadoop/hbase/codec/TestCellCodec.java | 7 +- .../hadoop/hbase/codec/TestCellCodecWithTags.java | 8 +- .../hbase/codec/TestKeyValueCodecWithTags.java | 8 +- .../org/apache/hadoop/hbase/types/TestPBCell.java | 3 +- .../hbase/codec/prefixtree/PrefixTreeCodec.java | 13 +- .../hbase/codec/prefixtree/PrefixTreeSeeker.java | 11 +- .../prefixtree/decode/PrefixTreeArrayScanner.java | 3 +- .../codec/prefixtree/decode/PrefixTreeCell.java | 6 +- .../codec/prefixtree/row/BaseTestRowData.java | 4 +- .../prefixtree/row/TestPrefixTreeSearcher.java | 6 +- .../row/data/TestRowDataSearcherRowMiss.java | 14 +- .../prefixtree/row/data/TestRowDataSimple.java | 14 +- .../hadoop/hbase/io/hfile/AbstractHFileReader.java | 11 +- .../hadoop/hbase/io/hfile/AbstractHFileWriter.java | 6 +- .../hadoop/hbase/io/hfile/FixedFileTrailer.java | 29 +- .../org/apache/hadoop/hbase/io/hfile/HFile.java | 17 +- .../hadoop/hbase/io/hfile/HFileBlockIndex.java | 12 +- .../hadoop/hbase/io/hfile/HFilePrettyPrinter.java | 6 +- .../hadoop/hbase/io/hfile/HFileReaderV2.java | 14 +- .../hadoop/hbase/io/hfile/HFileWriterV2.java | 5 +- .../hadoop/hbase/io/hfile/HFileWriterV3.java | 6 +- .../hbase/mapreduce/KeyValueSerialization.java | 5 +- .../hadoop/hbase/regionserver/CellSkipListSet.java | 3 +- .../hadoop/hbase/regionserver/DefaultMemStore.java | 5 +- .../hbase/regionserver/DefaultStoreEngine.java | 4 +- .../regionserver/DefaultStoreFileManager.java | 6 +- .../regionserver/GetClosestRowBeforeTracker.java | 18 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 6 +- .../apache/hadoop/hbase/regionserver/HStore.java | 17 +- .../hadoop/hbase/regionserver/KeyValueHeap.java | 10 +- .../hbase/regionserver/ReversedKeyValueHeap.java | 10 +- .../regionserver/ReversedRegionScannerImpl.java | 2 + .../hbase/regionserver/ReversedStoreScanner.java | 7 +- .../apache/hadoop/hbase/regionserver/ScanInfo.java | 10 +- .../hbase/regionserver/ScanQueryMatcher.java | 11 +- .../apache/hadoop/hbase/regionserver/Store.java | 3 +- .../hadoop/hbase/regionserver/StoreEngine.java | 8 +- .../hadoop/hbase/regionserver/StoreFile.java | 20 +- .../hbase/regionserver/StoreFileScanner.java | 7 +- .../hadoop/hbase/regionserver/StoreScanner.java | 8 +- .../hbase/regionserver/StripeMultiFileWriter.java | 6 +- .../hbase/regionserver/StripeStoreEngine.java | 4 +- .../hbase/regionserver/StripeStoreFileManager.java | 16 +- .../hadoop/hbase/regionserver/wal/WALEdit.java | 6 +- .../apache/hadoop/hbase/util/BloomFilterBase.java | 4 +- .../apache/hadoop/hbase/util/ByteBloomFilter.java | 4 +- .../hadoop/hbase/util/CollectionBackedScanner.java | 9 +- .../hadoop/hbase/util/CompoundBloomFilter.java | 4 +- .../hadoop/hbase/util/CompoundBloomFilterBase.java | 6 +- .../hbase/util/CompoundBloomFilterWriter.java | 4 +- .../apache/hadoop/hbase/util/CompressionTest.java | 3 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 5 +- .../org/apache/hadoop/hbase/TestSerialization.java | 8 +- .../hadoop/hbase/codec/TestCellMessageCodec.java | 7 +- .../hadoop/hbase/io/TestHalfStoreFileReader.java | 4 +- .../hbase/io/hfile/TestFixedFileTrailer.java | 21 +- .../apache/hadoop/hbase/io/hfile/TestHFile.java | 11 +- .../hadoop/hbase/io/hfile/TestHFileBlockIndex.java | 3 +- .../hadoop/hbase/io/hfile/TestHFileSeek.java | 3 +- .../hadoop/hbase/io/hfile/TestHFileWriterV2.java | 4 +- .../hadoop/hbase/io/hfile/TestHFileWriterV3.java | 4 +- .../hbase/regionserver/KeyValueScanFixture.java | 10 +- .../hadoop/hbase/regionserver/MockStoreFile.java | 4 +- .../regionserver/TestCompoundBloomFilter.java | 5 +- .../hbase/regionserver/TestDefaultMemStore.java | 2 +- .../hbase/regionserver/TestDefaultStoreEngine.java | 4 +- .../regionserver/TestGetClosestAtOrBefore.java | 4 +- .../hadoop/hbase/regionserver/TestHRegion.java | 12 +- .../hbase/regionserver/TestHRegionOnCluster.java | 2 +- .../hbase/regionserver/TestMultiColumnScanner.java | 3 +- .../hbase/regionserver/TestQueryMatcher.java | 4 +- .../hbase/regionserver/TestRecoveredEdits.java | 6 +- .../hadoop/hbase/regionserver/TestStore.java | 8 +- .../hadoop/hbase/regionserver/TestStoreFile.java | 16 +- .../hbase/regionserver/TestStripeCompactor.java | 6 +- .../hbase/regionserver/TestStripeStoreEngine.java | 4 +- .../regionserver/TestStripeStoreFileManager.java | 4 +- .../compactions/TestStripeCompactionPolicy.java | 2 +- 98 files changed, 1384 insertions(+), 1398 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 7a43a50..16f57ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -27,7 +27,6 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -871,7 +870,7 @@ public class HRegionInfo implements Comparable { /** * @return Comparator to use comparing {@link KeyValue}s. */ - public KVComparator getComparator() { + public CellComparator getComparator() { return isMetaRegion()? KeyValue.META_COMPARATOR: KeyValue.COMPARATOR; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java index ba1a818..138d232 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -24,7 +24,7 @@ import java.util.Arrays; import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -216,7 +216,7 @@ final public class FilterList extends Filter { @Override public Cell transformCell(Cell c) throws IOException { - if (!CellComparator.equals(c, referenceCell)) { + if (!CellUtil.equals(c, referenceCell)) { throw new IllegalStateException("Reference Cell: " + this.referenceCell + " does not match: " + c); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index 8101f4a..e2975c0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -32,6 +32,7 @@ import java.util.Stack; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -294,7 +295,7 @@ public class ParseFilter { * @return an ArrayList containing the arguments of the filter in the filter string */ public static ArrayList getFilterArguments (byte [] filterStringAsByteArray) { - int argumentListStartIndex = KeyValue.getDelimiter(filterStringAsByteArray, 0, + int argumentListStartIndex = CellUtil.getDelimiter(filterStringAsByteArray, 0, filterStringAsByteArray.length, ParseConstants.LPAREN); if (argumentListStartIndex == -1) { @@ -818,7 +819,7 @@ public class ParseFilter { * @return the parsed arguments of the comparator as a 2D byte array */ public static byte [][] parseComparator (byte [] comparator) { - final int index = KeyValue.getDelimiter(comparator, 0, comparator.length, ParseConstants.COLON); + final int index = CellUtil.getDelimiter(comparator, 0, comparator.length, ParseConstants.COLON); if (index == -1) { throw new IllegalArgumentException("Incorrect comparator"); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index 9671ea6..b969333 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -32,12 +32,14 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hbase.CellComparator; import org.apache.commons.lang.NotImplementedException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -674,7 +676,7 @@ public class TestClientNoCluster extends Configured implements Tool { * Comparator for meta row keys. */ private static class MetaRowsComparator implements Comparator { - private final KeyValue.KVComparator delegate = new KeyValue.MetaComparator(); + private final CellComparator delegate = KeyValue.META_COMPARATOR; @Override public int compare(byte[] left, byte[] right) { return delegate.compareRows(left, 0, left.length, right, 0, right.length); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java index 540c967..16afa2a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java @@ -19,12 +19,14 @@ package org.apache.hadoop.hbase; import java.io.Serializable; -import java.util.Comparator; - +import java.util.Arrays; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.RawComparator; import com.google.common.primitives.Longs; @@ -40,14 +42,21 @@ import com.google.common.primitives.Longs; justification="Findbugs doesn't like the way we are negating the result of a compare in below") @InterfaceAudience.Private @InterfaceStability.Evolving -public class CellComparator implements Comparator, Serializable { +public class CellComparator implements RawComparator, Serializable { + public CellComparator() { + } private static final long serialVersionUID = -8760041766259623329L; + static final Log LOG = LogFactory.getLog(CellComparator.class); @Override public int compare(Cell a, Cell b) { return compare(a, b, false); } + public int compareOnlyKeyPortion(Cell left, Cell right) { + return compare(left, right, true); + } + /** * Compare cells. * TODO: Replace with dynamic rather than static comparator so can change comparator @@ -58,7 +67,7 @@ public class CellComparator implements Comparator, Serializable { * the sequenceid. Set to false to compare key and consider sequenceid. * @return 0 if equal, -1 if a < b, and +1 if a > b. */ - public static int compare(final Cell a, final Cell b, boolean ignoreSequenceid) { + public int compare(final Cell a, final Cell b, boolean ignoreSequenceid) { // row int c = compareRows(a, b); if (c != 0) return c; @@ -75,74 +84,7 @@ public class CellComparator implements Comparator, Serializable { } } - public static int findCommonPrefixInRowPart(Cell left, Cell right, int rowCommonPrefix) { - return findCommonPrefix(left.getRowArray(), right.getRowArray(), left.getRowLength() - - rowCommonPrefix, right.getRowLength() - rowCommonPrefix, left.getRowOffset() - + rowCommonPrefix, right.getRowOffset() + rowCommonPrefix); - } - - private static int findCommonPrefix(byte[] left, byte[] right, int leftLength, int rightLength, - int leftOffset, int rightOffset) { - int length = Math.min(leftLength, rightLength); - int result = 0; - - while (result < length && left[leftOffset + result] == right[rightOffset + result]) { - result++; - } - return result; - } - - public static int findCommonPrefixInFamilyPart(Cell left, Cell right, int familyCommonPrefix) { - return findCommonPrefix(left.getFamilyArray(), right.getFamilyArray(), left.getFamilyLength() - - familyCommonPrefix, right.getFamilyLength() - familyCommonPrefix, left.getFamilyOffset() - + familyCommonPrefix, right.getFamilyOffset() + familyCommonPrefix); - } - - public static int findCommonPrefixInQualifierPart(Cell left, Cell right, - int qualifierCommonPrefix) { - return findCommonPrefix(left.getQualifierArray(), right.getQualifierArray(), - left.getQualifierLength() - qualifierCommonPrefix, right.getQualifierLength() - - qualifierCommonPrefix, left.getQualifierOffset() + qualifierCommonPrefix, - right.getQualifierOffset() + qualifierCommonPrefix); - } - - /**************** equals ****************************/ - - public static boolean equals(Cell a, Cell b){ - return equalsRow(a, b) - && equalsFamily(a, b) - && equalsQualifier(a, b) - && equalsTimestamp(a, b) - && equalsType(a, b); - } - - public static boolean equalsRow(Cell a, Cell b){ - return Bytes.equals( - a.getRowArray(), a.getRowOffset(), a.getRowLength(), - b.getRowArray(), b.getRowOffset(), b.getRowLength()); - } - - public static boolean equalsFamily(Cell a, Cell b){ - return Bytes.equals( - a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(), - b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength()); - } - - public static boolean equalsQualifier(Cell a, Cell b){ - return Bytes.equals( - a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(), - b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength()); - } - - public static boolean equalsTimestamp(Cell a, Cell b){ - return a.getTimestamp() == b.getTimestamp(); - } - - public static boolean equalsType(Cell a, Cell b){ - return a.getTypeByte() == b.getTypeByte(); - } - - public static int compareColumns(final Cell left, final Cell right) { + public int compareColumns(final Cell left, final Cell right) { int lfoffset = left.getFamilyOffset(); int rfoffset = right.getFamilyOffset(); int lclength = left.getQualifierLength(); @@ -159,17 +101,27 @@ public class CellComparator implements Comparator, Serializable { } } - public static int compareFamilies(Cell left, Cell right) { + public int compareFamilies(Cell left, Cell right) { return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); } - public static int compareQualifiers(Cell left, Cell right) { + public int compareFamilies(byte[] leftFamily, int lFamOffset, int lFamLength, byte[] rightFamily, + int rFamOffset, int rFamLen) { + return Bytes.compareTo(leftFamily, lFamOffset, lFamLength, rightFamily, rFamOffset, rFamLen); + } + + public int compareQualifiers(Cell left, Cell right) { return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); } + public int compareColumns(byte[] leftCol, int lColOffset, int lColLength, byte[] rightCol, + int rColOffset, int rColLength) { + return Bytes.compareTo(leftCol, lColOffset, lColLength, rightCol, rColOffset, rColLength); + } + public int compareFlatKey(Cell left, Cell right) { int compare = compareRows(left, right); if (compare != 0) { @@ -178,32 +130,169 @@ public class CellComparator implements Comparator, Serializable { return compareWithoutRow(left, right); } + public int compareFlatKey(byte[] left, byte[] right) { + return compareFlatKey(left, 0, left.length, right, 0, right.length); + } + + /** + * Compares left to right assuming that left,loffset,llength and right,roffset,rlength are + * full KVs laid out in a flat byte[]s. + * @param left + * @param loffset + * @param llength + * @param right + * @param roffset + * @param rlength + * @return 0 if equal, <0 if left smaller, >0 if right smaller + */ + // We will get methods that deals with BB here once BB backed cells come + public int compareFlatKey(byte[] left, int loffset, int llength, byte[] right, int roffset, + int rlength) { + // Compare row + short lrowlength = Bytes.toShort(left, loffset); + short rrowlength = Bytes.toShort(right, roffset); + int compare = compareRows(left, loffset + Bytes.SIZEOF_SHORT, lrowlength, right, roffset + + Bytes.SIZEOF_SHORT, rrowlength); + if (compare != 0) { + return compare; + } + + // Compare the rest of the two KVs without making any assumptions about + // the common prefix. This function will not compare rows anyway, so we + // don't need to tell it that the common prefix includes the row. + return compareWithoutRow(0, left, loffset, llength, right, roffset, rlength, rrowlength); + } + + /** + * Compare columnFamily, qualifier, timestamp, and key type (everything + * except the row). This method is used both in the normal comparator and + * the "same-prefix" comparator. Note that we are assuming that row portions + * of both KVs have already been parsed and found identical, and we don't + * validate that assumption here. + * @param commonPrefix + * the length of the common prefix of the two key-values being + * compared, including row length and row + */ + public int compareWithoutRow(int commonPrefix, byte[] left, int loffset, int llength, + byte[] right, int roffset, int rlength, short rowlength) { + /*** + * KeyValue Format and commonLength: + * |_keyLen_|_valLen_|_rowLen_|_rowKey_|_famiLen_|_fami_|_Quali_|.... + * ------------------|-------commonLength--------|-------------- + */ + int commonLength = KeyValue.ROW_LENGTH_SIZE + KeyValue.FAMILY_LENGTH_SIZE + rowlength; + + // commonLength + TIMESTAMP_TYPE_SIZE + int commonLengthWithTSAndType = KeyValue.TIMESTAMP_TYPE_SIZE + commonLength; + // ColumnFamily + Qualifier length. + int lcolumnlength = llength - commonLengthWithTSAndType; + int rcolumnlength = rlength - commonLengthWithTSAndType; + + byte ltype = left[loffset + (llength - 1)]; + byte rtype = right[roffset + (rlength - 1)]; + + // If the column is not specified, the "minimum" key type appears the + // latest in the sorted order, regardless of the timestamp. This is used + // for specifying the last key/value in a given row, because there is no + // "lexicographically last column" (it would be infinitely long). The + // "maximum" key type does not need this behavior. + if (lcolumnlength == 0 && ltype == Type.Minimum.getCode()) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + if (rcolumnlength == 0 && rtype == Type.Minimum.getCode()) { + return -1; + } + + int lfamilyoffset = commonLength + loffset; + int rfamilyoffset = commonLength + roffset; + + // Column family length. + int lfamilylength = left[lfamilyoffset - 1]; + int rfamilylength = right[rfamilyoffset - 1]; + // If left family size is not equal to right family size, we need not + // compare the qualifiers. + boolean sameFamilySize = (lfamilylength == rfamilylength); + int common = 0; + if (commonPrefix > 0) { + common = Math.max(0, commonPrefix - commonLength); + if (!sameFamilySize) { + // Common should not be larger than Math.min(lfamilylength, + // rfamilylength). + common = Math.min(common, Math.min(lfamilylength, rfamilylength)); + } else { + common = Math.min(common, Math.min(lcolumnlength, rcolumnlength)); + } + } + if (!sameFamilySize) { + // comparing column family is enough. + return Bytes.compareTo(left, lfamilyoffset + common, lfamilylength - common, right, + rfamilyoffset + common, rfamilylength - common); + } + // Compare family & qualifier together. + final int comparison = Bytes.compareTo(left, lfamilyoffset + common, lcolumnlength - common, + right, rfamilyoffset + common, rcolumnlength - common); + if (comparison != 0) { + return comparison; + } + + // // + // Next compare timestamps. + long ltimestamp = Bytes.toLong(left, loffset + (llength - KeyValue.TIMESTAMP_TYPE_SIZE)); + long rtimestamp = Bytes.toLong(right, roffset + (rlength - KeyValue.TIMESTAMP_TYPE_SIZE)); + int compare = compareTimestamps(ltimestamp, rtimestamp); + if (compare != 0) { + return compare; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & rtype) - (0xff & ltype); + } + /** * Do not use comparing rows from hbase:meta. Meta table Cells have schema (table,startrow,hash) * so can't be treated as plain byte arrays as this method does. */ - public static int compareRows(final Cell left, final Cell right) { + public int compareRows(final Cell left, final Cell right) { return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right.getRowArray(), right.getRowOffset(), right.getRowLength()); } + public int compareRows(Cell left, int loffset, int llength, Cell right, int roffset, + int rlength) { + // TODO : for BB based cells all the hasArray based checks would happen + // here. But we may have + // to end up in multiple APIs accepting byte[] and BBs + return Bytes.compareTo(left.getRowArray(), loffset, llength, right.getRowArray(), roffset, + rlength); + } + + public int compareRows(Cell left, int loffset, int llength, byte[] right, int roffset, + int rlength) { + // TODO : for BB based cells all the hasArray based checks would happen + // here. But we may have + // to end up in multiple APIs accepting byte[] and BBs + return Bytes.compareTo(left.getRowArray(), loffset, llength, right, roffset, rlength); + } /** * Do not use comparing rows from hbase:meta. Meta table Cells have schema (table,startrow,hash) * so can't be treated as plain byte arrays as this method does. */ - public static int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, + public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, int rlength) { return Bytes.compareTo(left, loffset, llength, right, roffset, rlength); } - public static int compareWithoutRow(final Cell leftCell, final Cell rightCell) { + public int compareWithoutRow(final Cell leftCell, final Cell rightCell) { // If the column is not specified, the "minimum" key type appears the // latest in the sorted order, regardless of the timestamp. This is used // for specifying the last key/value in a given row, because there is no // "lexicographically last column" (it would be infinitely long). The // "maximum" key type does not need this behavior. // Copied from KeyValue. This is bad in that we can't do memcmp w/ special rules like this. - // TODO if (leftCell.getFamilyLength() + leftCell.getQualifierLength() == 0 && leftCell.getTypeByte() == Type.Minimum.getCode()) { // left is "bigger", i.e. it appears later in the sorted order @@ -216,15 +305,12 @@ public class CellComparator implements Comparator, Serializable { boolean sameFamilySize = (leftCell.getFamilyLength() == rightCell.getFamilyLength()); if (!sameFamilySize) { // comparing column family is enough. - - return Bytes.compareTo(leftCell.getFamilyArray(), leftCell.getFamilyOffset(), - leftCell.getFamilyLength(), rightCell.getFamilyArray(), rightCell.getFamilyOffset(), - rightCell.getFamilyLength()); + return compareFamilies(leftCell, rightCell); } int diff = compareColumns(leftCell, rightCell); if (diff != 0) return diff; - diff = compareTimestamps(leftCell, rightCell); + diff = CellUtil.compareTimestamps(leftCell, rightCell); if (diff != 0) return diff; // Compare types. Let the delete types sort ahead of puts; i.e. types @@ -234,12 +320,6 @@ public class CellComparator implements Comparator, Serializable { return (0xff & rightCell.getTypeByte()) - (0xff & leftCell.getTypeByte()); } - public static int compareTimestamps(final Cell left, final Cell right) { - long ltimestamp = left.getTimestamp(); - long rtimestamp = right.getTimestamp(); - return compareTimestamps(ltimestamp, rtimestamp); - } - /********************* hashCode ************************/ /** @@ -255,6 +335,61 @@ public class CellComparator implements Comparator, Serializable { return hash; } + public int compareTimestamps(final Cell left, final Cell right) { + return CellUtil.compareTimestamps(left, right); + } + + public int compareTimestamps(final long ts1, final long ts2) { + return CellUtil.compareTimestamps(ts1, ts2); + } + + + // compare a key against row/fam/qual/ts/type + public int compareKey(Cell cell, byte[] row, int roff, int rlen, byte[] fam, int foff, int flen, + byte[] col, int coff, int clen, long ts, byte type) { + + int compare = compareRows(cell, cell.getRowOffset(), cell.getRowLength(), row, roff, + rlen); + if (compare != 0) { + return compare; + } + // If the column is not specified, the "minimum" key type appears the + // latest in the sorted order, regardless of the timestamp. This is used + // for specifying the last key/value in a given row, because there is no + // "lexicographically last column" (it would be infinitely long). The + // "maximum" key type does not need this behavior. + if (cell.getFamilyLength() + cell.getQualifierLength() == 0 + && cell.getTypeByte() == Type.Minimum.getCode()) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + if (flen + clen == 0 && type == Type.Minimum.getCode()) { + return -1; + } + + compare = compareFamilies(cell.getFamilyArray(), cell.getFamilyOffset(), + cell.getFamilyLength(), fam, foff, flen); + if (compare != 0) { + return compare; + } + compare = compareColumns(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), col, coff, clen); + if (compare != 0) { + return compare; + } + // Next compare timestamps. + compare = compareTimestamps(cell.getTimestamp(), ts); + if (compare != 0) { + return compare; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & type) - (0xff & cell.getTypeByte()); + } + /** * Returns a hash code that is always the same for two Cells having a matching * equals(..) result. Note : Ignore mvcc while calculating the hashcode @@ -287,34 +422,20 @@ public class CellComparator implements Comparator, Serializable { return hash; } - - /******************** lengths *************************/ - - public static boolean areKeyLengthsEqual(Cell a, Cell b) { - return a.getRowLength() == b.getRowLength() - && a.getFamilyLength() == b.getFamilyLength() - && a.getQualifierLength() == b.getQualifierLength(); - } - - public static boolean areRowLengthsEqual(Cell a, Cell b) { - return a.getRowLength() == b.getRowLength(); - } - - /*********************common prefixes*************************/ - private static int compare(byte[] left, int leftOffset, int leftLength, byte[] right, + public int compare(byte[] left, int leftOffset, int leftLength, byte[] right, int rightOffset, int rightLength) { - return Bytes.compareTo(left, leftOffset, leftLength, right, rightOffset, rightLength); + return CellUtil.compare(left, leftOffset, leftLength, right, rightOffset, rightLength); } - public static int compareCommonRowPrefix(Cell left, Cell right, int rowCommonPrefix) { + public int compareCommonRowPrefix(Cell left, Cell right, int rowCommonPrefix) { return compare(left.getRowArray(), left.getRowOffset() + rowCommonPrefix, left.getRowLength() - rowCommonPrefix, right.getRowArray(), right.getRowOffset() + rowCommonPrefix, right.getRowLength() - rowCommonPrefix); } - public static int compareCommonFamilyPrefix(Cell left, Cell right, + public int compareCommonFamilyPrefix(Cell left, Cell right, int familyCommonPrefix) { return compare(left.getFamilyArray(), left.getFamilyOffset() + familyCommonPrefix, left.getFamilyLength() - familyCommonPrefix, right.getFamilyArray(), @@ -322,7 +443,7 @@ public class CellComparator implements Comparator, Serializable { right.getFamilyLength() - familyCommonPrefix); } - public static int compareCommonQualifierPrefix(Cell left, Cell right, + public int compareCommonQualifierPrefix(Cell left, Cell right, int qualCommonPrefix) { return compare(left.getQualifierArray(), left.getQualifierOffset() + qualCommonPrefix, left.getQualifierLength() - qualCommonPrefix, right.getQualifierArray(), @@ -330,43 +451,67 @@ public class CellComparator implements Comparator, Serializable { - qualCommonPrefix); } - /***************** special cases ****************************/ /** - * special case for KeyValue.equals + * Compares the row of two keyvalues for equality + * @param left + * @param right + * @return True if rows match. */ - public static boolean equalsIgnoreMvccVersion(Cell a, Cell b){ - return 0 == compareStaticIgnoreMvccVersion(a, b); + public boolean matchingRows(final Cell left, final Cell right) { + short lrowlength = left.getRowLength(); + short rrowlength = right.getRowLength(); + return matchingRows(left, lrowlength, right, rrowlength); } - private static int compareStaticIgnoreMvccVersion(Cell a, Cell b) { - // row - int c = compareRows(a, b); - if (c != 0) return c; + /** + * @param left + * @param lrowlength + * @param right + * @param rrowlength + * @return True if rows match. + */ + public boolean matchingRows(final Cell left, final short lrowlength, + final Cell right, final short rrowlength) { + return lrowlength == rrowlength && + matchingRows(left.getRowArray(), left.getRowOffset(), lrowlength, + right.getRowArray(), right.getRowOffset(), rrowlength); + } - // family - c = compareColumns(a, b); - if (c != 0) return c; + /** + * Compare rows. Just calls Bytes.equals, but it's good to have this encapsulated. + * @param left Left row array. + * @param loffset Left row offset. + * @param llength Left row length. + * @param right Right row array. + * @param roffset Right row offset. + * @param rlength Right row length. + * @return Whether rows are the same row. + */ + public boolean matchingRows(final byte [] left, final int loffset, final int llength, + final byte [] right, final int roffset, final int rlength) { + return Bytes.equals(left, loffset, llength, right, roffset, rlength); + } - // timestamp: later sorts first - c = compareTimestamps(a, b); - if (c != 0) return c; + /** + * Compares the row and column of two keyvalues for equality + * @param left + * @param right + * @return True if same row and column. + */ + public boolean matchingRowColumn(final Cell left, + final Cell right) { + short lrowlength = left.getRowLength(); + short rrowlength = right.getRowLength(); - //type - c = (0xff & b.getTypeByte()) - (0xff & a.getTypeByte()); - return c; - } + if ((lrowlength + left.getFamilyLength() + left.getQualifierLength()) != (rrowlength + + right.getFamilyLength() + right.getQualifierLength())) { + return false; + } - private static int compareTimestamps(final long ltimestamp, final long rtimestamp) { - // The below older timestamps sorting ahead of newer timestamps looks - // wrong but it is intentional. This way, newer timestamps are first - // found when we iterate over a memstore and newer versions are the - // first we trip over when reading from a store file. - if (ltimestamp < rtimestamp) { - return 1; - } else if (ltimestamp > rtimestamp) { - return -1; + if (!matchingRows(left, lrowlength, right, rrowlength)) { + return false; } - return 0; + return 0 == compareColumns(left, right); } /** @@ -389,7 +534,7 @@ public class CellComparator implements Comparator, Serializable { * @param right * @return A cell that sorts between left and right. */ - public static Cell getMidpoint(final KeyValue.KVComparator comparator, final Cell left, + public static Cell getMidpoint(final CellComparator comparator, final Cell left, final Cell right) { // TODO: Redo so only a single pass over the arrays rather than one to compare and then a // second composing midpoint. @@ -402,10 +547,10 @@ public class CellComparator implements Comparator, Serializable { // If Cells from meta table, don't mess around. meta table Cells have schema // (table,startrow,hash) so can't be treated as plain byte arrays. Just skip out without // trying to do this optimization. - if (comparator != null && comparator instanceof KeyValue.MetaComparator) { + if (comparator != null && comparator instanceof MetaCellComparator) { return right; } - int diff = compareRows(left, right); + int diff = comparator.compareRows(left, right); if (diff > 0) { throw new IllegalArgumentException("Left row sorts after right row; left=" + CellUtil.getCellKeyAsString(left) + ", right=" + CellUtil.getCellKeyAsString(right)); @@ -420,7 +565,7 @@ public class CellComparator implements Comparator, Serializable { return CellUtil.createCell(midRow); } // Rows are same. Compare on families. - diff = compareFamilies(left, right); + diff = comparator.compareFamilies(left, right); if (diff > 0) { throw new IllegalArgumentException("Left family sorts after right family; left=" + CellUtil.getCellKeyAsString(left) + ", right=" + CellUtil.getCellKeyAsString(right)); @@ -437,7 +582,7 @@ public class CellComparator implements Comparator, Serializable { HConstants.EMPTY_BYTE_ARRAY.length); } // Families are same. Compare on qualifiers. - diff = compareQualifiers(left, right); + diff = comparator.compareQualifiers(left, right); if (diff > 0) { throw new IllegalArgumentException("Left qualifier sorts after right qualifier; left=" + CellUtil.getCellKeyAsString(left) + ", right=" + CellUtil.getCellKeyAsString(right)); @@ -495,4 +640,244 @@ public class CellComparator implements Comparator, Serializable { } return minimumMidpointArray; } + + /** + * The HFileV2 file format's trailer contains this class name. We reinterpret this and + * instantiate the appropriate comparator. + * TODO: With V3 consider removing this. + * @return legacy class name for FileFileTrailer#comparatorClassName + */ + public String getLegacyKeyComparatorName() { + // The path of this comparator is weird. Do we really need to change this? + // There is a TODO above to get rid of this + return "org.apache.hadoop.hbase.KeyValue$KeyComparator"; + } + + public byte[] calcIndexKey(byte[] lastKeyOfPreviousBlock, byte[] firstKeyInBlock) { + byte[] fakeKey = getShortMidpointKey(lastKeyOfPreviousBlock, firstKeyInBlock); + if (compareFlatKey(fakeKey, firstKeyInBlock) > 0) { + LOG.error("Unexpected getShortMidpointKey result, fakeKey:" + + Bytes.toStringBinary(fakeKey) + ", firstKeyInBlock:" + + Bytes.toStringBinary(firstKeyInBlock)); + return firstKeyInBlock; + } + if (lastKeyOfPreviousBlock != null && compareFlatKey(lastKeyOfPreviousBlock, fakeKey) >= 0) { + LOG.error("Unexpected getShortMidpointKey result, lastKeyOfPreviousBlock:" + + Bytes.toStringBinary(lastKeyOfPreviousBlock) + ", fakeKey:" + + Bytes.toStringBinary(fakeKey)); + return firstKeyInBlock; + } + return fakeKey; + } + + /** + * This is a HFile block index key optimization. + * @param leftKey + * @param rightKey + * @return 0 if equal, <0 if left smaller, >0 if right smaller + * @deprecated Since 0.99.2; Use + * {@link CellComparator#getMidpoint(CellComparator, Cell, Cell) instead} + */ + @Deprecated + public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) { + if (rightKey == null) { + throw new IllegalArgumentException("rightKey can not be null"); + } + if (leftKey == null) { + return Arrays.copyOf(rightKey, rightKey.length); + } + if (compareFlatKey(leftKey, rightKey) >= 0) { + throw new IllegalArgumentException("Unexpected input, leftKey:" + Bytes.toString(leftKey) + + ", rightKey:" + Bytes.toString(rightKey)); + } + + short leftRowLength = Bytes.toShort(leftKey, 0); + short rightRowLength = Bytes.toShort(rightKey, 0); + int leftCommonLength = KeyValue.ROW_LENGTH_SIZE + KeyValue.FAMILY_LENGTH_SIZE + leftRowLength; + int rightCommonLength = KeyValue.ROW_LENGTH_SIZE + KeyValue.FAMILY_LENGTH_SIZE + rightRowLength; + int leftCommonLengthWithTSAndType = KeyValue.TIMESTAMP_TYPE_SIZE + leftCommonLength; + int rightCommonLengthWithTSAndType = KeyValue.TIMESTAMP_TYPE_SIZE + rightCommonLength; + int leftColumnLength = leftKey.length - leftCommonLengthWithTSAndType; + int rightColumnLength = rightKey.length - rightCommonLengthWithTSAndType; + // rows are equal + if (leftRowLength == rightRowLength + && compareRows(leftKey, KeyValue.ROW_LENGTH_SIZE, leftRowLength, rightKey, + KeyValue.ROW_LENGTH_SIZE, rightRowLength) == 0) { + // Compare family & qualifier together. + int comparison = Bytes.compareTo(leftKey, leftCommonLength, leftColumnLength, rightKey, + rightCommonLength, rightColumnLength); + // same with "row + family + qualifier", return rightKey directly + if (comparison == 0) { + return Arrays.copyOf(rightKey, rightKey.length); + } + // "family + qualifier" are different, generate a faked key per rightKey + byte[] newKey = Arrays.copyOf(rightKey, rightKey.length); + Bytes.putLong(newKey, rightKey.length - KeyValue.TIMESTAMP_TYPE_SIZE, + HConstants.LATEST_TIMESTAMP); + Bytes.putByte(newKey, rightKey.length - KeyValue.TYPE_SIZE, Type.Maximum.getCode()); + return newKey; + } + // rows are different + short minLength = leftRowLength < rightRowLength ? leftRowLength : rightRowLength; + short diffIdx = 0; + while (diffIdx < minLength + && leftKey[KeyValue.ROW_LENGTH_SIZE + diffIdx] == rightKey[KeyValue.ROW_LENGTH_SIZE + + diffIdx]) { + diffIdx++; + } + byte[] newRowKey = null; + if (diffIdx >= minLength) { + // leftKey's row is prefix of rightKey's. + newRowKey = new byte[diffIdx + 1]; + System.arraycopy(rightKey, KeyValue.ROW_LENGTH_SIZE, newRowKey, 0, diffIdx + 1); + } else { + int diffByte = leftKey[KeyValue.ROW_LENGTH_SIZE + diffIdx]; + if ((0xff & diffByte) < 0xff && (diffByte + 1) < + (rightKey[KeyValue.ROW_LENGTH_SIZE + diffIdx] & 0xff)) { + newRowKey = new byte[diffIdx + 1]; + System.arraycopy(leftKey, KeyValue.ROW_LENGTH_SIZE, newRowKey, 0, diffIdx); + newRowKey[diffIdx] = (byte) (diffByte + 1); + } else { + newRowKey = new byte[diffIdx + 1]; + System.arraycopy(rightKey, KeyValue.ROW_LENGTH_SIZE, newRowKey, 0, diffIdx + 1); + } + } + return new KeyValue(newRowKey, null, null, HConstants.LATEST_TIMESTAMP, Type.Maximum) + .getKey(); + } + + @Override + protected Object clone() throws CloneNotSupportedException { + super.clone(); + return new CellComparator(); + } + + /** + * A {@link CellComparator} for hbase:meta catalog table + * {@link KeyValue}s. + */ + public static class MetaCellComparator extends CellComparator { + /** + * Compare key portion of a {@link KeyValue} for keys in + * hbase:meta table. + */ + @Override + public int compare(final Cell left, final Cell right) { + int c = compareRowKey(left, right); + if (c != 0) { + return c; + } + return compareWithoutRow(left, right); + } + + @Override + public int compareOnlyKeyPortion(Cell left, Cell right) { + return compare(left, right); + } + + @Override + public int compareRows(Cell left, int loffset, int llength, Cell right, int roffset, + int rlength) { + // TODO : for BB based cells all the hasArray based checks would happen + // here. But we may have + // to end up in multiple APIs accepting byte[] and BBs + return compareRows(left.getRowArray(), loffset, llength, right.getRowArray(), roffset, + rlength); + } + + @Override + public int compareRows(Cell left, int loffset, int llength, byte[] right, int roffset, + int rlength) { + // TODO : for BB based cells all the hasArray based checks would happen here. But we may have + // to end up in multiple APIs accepting byte[] and BBs + return compareRows(left.getRowArray(), loffset, llength, right, roffset, + rlength); + } + + @Override + public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, + int rlength) { + int leftDelimiter = CellUtil.getDelimiter(left, loffset, llength, HConstants.DELIMITER); + int rightDelimiter = CellUtil.getDelimiter(right, roffset, rlength, HConstants.DELIMITER); + // Compare up to the delimiter + int lpart = (leftDelimiter < 0 ? llength : leftDelimiter - loffset); + int rpart = (rightDelimiter < 0 ? rlength : rightDelimiter - roffset); + int result = Bytes.compareTo(left, loffset, lpart, right, roffset, rpart); + if (result != 0) { + return result; + } else { + if (leftDelimiter < 0 && rightDelimiter >= 0) { + return -1; + } else if (rightDelimiter < 0 && leftDelimiter >= 0) { + return 1; + } else if (leftDelimiter < 0 && rightDelimiter < 0) { + return 0; + } + } + // Compare middle bit of the row. + // Move past delimiter + leftDelimiter++; + rightDelimiter++; + int leftFarDelimiter = CellUtil.getDelimiterInReverse(left, leftDelimiter, llength + - (leftDelimiter - loffset), HConstants.DELIMITER); + int rightFarDelimiter = CellUtil.getDelimiterInReverse(right, rightDelimiter, rlength + - (rightDelimiter - roffset), HConstants.DELIMITER); + // Now compare middlesection of row. + lpart = (leftFarDelimiter < 0 ? llength + loffset : leftFarDelimiter) - leftDelimiter; + rpart = (rightFarDelimiter < 0 ? rlength + roffset : rightFarDelimiter) - rightDelimiter; + result = super.compareRows(left, leftDelimiter, lpart, right, rightDelimiter, rpart); + if (result != 0) { + return result; + } else { + if (leftDelimiter < 0 && rightDelimiter >= 0) { + return -1; + } else if (rightDelimiter < 0 && leftDelimiter >= 0) { + return 1; + } else if (leftDelimiter < 0 && rightDelimiter < 0) { + return 0; + } + } + // Compare last part of row, the rowid. + leftFarDelimiter++; + rightFarDelimiter++; + result = Bytes.compareTo(left, leftFarDelimiter, llength - (leftFarDelimiter - loffset), + right, rightFarDelimiter, rlength - (rightFarDelimiter - roffset)); + return result; + } + + /** + * Override the row key comparison to parse and compare the meta row key + * parts. + */ + protected int compareRowKey(final Cell l, final Cell r) { + int loffset = l.getRowOffset(); + int llength = l.getRowLength(); + int roffset = r.getRowOffset(); + int rlength = r.getRowLength(); + return compareRows(l, loffset, llength, r, roffset, rlength); + } + + /** + * Don't do any fancy Block Index splitting tricks. + */ + public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) { + return Arrays.copyOf(rightKey, rightKey.length); + } + + /** + * The HFileV2 file format's trailer contains this class name. We + * reinterpret this and instantiate the appropriate comparator. TODO: With + * V3 consider removing this. + * + * @return legacy class name for FileFileTrailer#comparatorClassName + */ + public String getLegacyKeyComparatorName() { + return "org.apache.hadoop.hbase.KeyValue$MetaKeyComparator"; + } + + @Override + protected Object clone() throws CloneNotSupportedException { + return new MetaCellComparator(); + } + } } \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index bce3957..6092c63 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -902,4 +902,243 @@ public final class CellUtil { return builder.toString(); } + + /***************** special cases ****************************/ + + /** + * special case for KeyValue.equals + */ + public static boolean equalsIgnoreMvccVersion(Cell a, Cell b) { + return 0 == compareStaticIgnoreMvccVersion(a, b); + } + + public static int compareStaticIgnoreMvccVersion(Cell a, Cell b) { + // row + int c = Bytes.compareTo(a.getRowArray(), a.getRowOffset(), a.getRowLength(), b.getRowArray(), + b.getRowOffset(), b.getRowLength()); + if (c != 0) + return c; + + // family + c = compareColumns(a, b); + if (c != 0) + return c; + + // timestamp: later sorts first + c = compareTimestamps(a, b); + if (c != 0) + return c; + + // type + c = (0xff & b.getTypeByte()) - (0xff & a.getTypeByte()); + return c; + } + + public static int compareColumns(final Cell left, final Cell right) { + int lfoffset = left.getFamilyOffset(); + int rfoffset = right.getFamilyOffset(); + int lclength = left.getQualifierLength(); + int rclength = right.getQualifierLength(); + int lfamilylength = left.getFamilyLength(); + int rfamilylength = right.getFamilyLength(); + int diff = compare(left.getFamilyArray(), lfoffset, lfamilylength, right.getFamilyArray(), + rfoffset, rfamilylength); + if (diff != 0) { + return diff; + } else { + return compare(left.getQualifierArray(), left.getQualifierOffset(), lclength, + right.getQualifierArray(), right.getQualifierOffset(), rclength); + } + } + + public static int compare(byte[] left, int leftOffset, int leftLength, byte[] right, + int rightOffset, int rightLength) { + return Bytes.compareTo(left, leftOffset, leftLength, right, rightOffset, rightLength); + } + + public static int compareTimestamps(final Cell left, final Cell right) { + long ltimestamp = left.getTimestamp(); + long rtimestamp = right.getTimestamp(); + return compareTimestamps(ltimestamp, rtimestamp); + } + + public static int compareTimestamps(final long ltimestamp, final long rtimestamp) { + // The below older timestamps sorting ahead of newer timestamps looks + // wrong but it is intentional. This way, newer timestamps are first + // found when we iterate over a memstore and newer versions are the + // first we trip over when reading from a store file. + if (ltimestamp < rtimestamp) { + return 1; + } else if (ltimestamp > rtimestamp) { + return -1; + } + return 0; + } + + /** + * Create a new KeyValue by copying existing cell and adding new tags + * + * @param c + * @param newTags + * @return a new KeyValue instance with new tags + */ + public static Cell cloneAndAddTags(Cell c, List newTags) { + List existingTags = null; + if (c.getTagsLength() > 0) { + existingTags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); + existingTags.addAll(newTags); + } else { + existingTags = newTags; + } + return new KeyValue(c.getRowArray(), c.getRowOffset(), c.getRowLength(), c.getFamilyArray(), + c.getFamilyOffset(), c.getFamilyLength(), c.getQualifierArray(), c.getQualifierOffset(), + c.getQualifierLength(), c.getTimestamp(), Type.codeToType(c.getTypeByte()), + c.getValueArray(), c.getValueOffset(), c.getValueLength(), existingTags); + } + + /******************** lengths *************************/ + + public static boolean areKeyLengthsEqual(Cell a, Cell b) { + return a.getRowLength() == b.getRowLength() && a.getFamilyLength() == b.getFamilyLength() + && a.getQualifierLength() == b.getQualifierLength(); + } + + public static boolean areRowLengthsEqual(Cell a, Cell b) { + return a.getRowLength() == b.getRowLength(); + } + + /********************* hashCode ************************/ + + /** + * Returns a hash code that is always the same for two Cells having a matching + * equals(..) result. Currently does not guard against nulls, but it could if + * necessary. + */ + public static int hashCode(Cell cell) { + if (cell == null) {// return 0 for empty Cell + return 0; + } + + // pre-calculate the 3 hashes made of byte ranges + int rowHash = Bytes.hashCode(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + int familyHash = Bytes.hashCode(cell.getFamilyArray(), cell.getFamilyOffset(), + cell.getFamilyLength()); + int qualifierHash = Bytes.hashCode(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()); + + // combine the 6 sub-hashes + int hash = 31 * rowHash + familyHash; + hash = 31 * hash + qualifierHash; + hash = 31 * hash + (int) cell.getTimestamp(); + hash = 31 * hash + cell.getTypeByte(); + hash = 31 * hash + (int) cell.getMvccVersion(); + return hash; + } + + public static int findCommonPrefixInRowPart(Cell left, Cell right, int rowCommonPrefix) { + return findCommonPrefix(left.getRowArray(), right.getRowArray(), left.getRowLength() + - rowCommonPrefix, right.getRowLength() - rowCommonPrefix, left.getRowOffset() + + rowCommonPrefix, right.getRowOffset() + rowCommonPrefix); + } + + public static int findCommonPrefixInFamilyPart(Cell left, Cell right, int familyCommonPrefix) { + return findCommonPrefix(left.getFamilyArray(), right.getFamilyArray(), left.getFamilyLength() + - familyCommonPrefix, right.getFamilyLength() - familyCommonPrefix, left.getFamilyOffset() + + familyCommonPrefix, right.getFamilyOffset() + familyCommonPrefix); + } + + public static int findCommonPrefixInQualifierPart(Cell left, Cell right, + int qualifierCommonPrefix) { + return findCommonPrefix(left.getQualifierArray(), right.getQualifierArray(), + left.getQualifierLength() - qualifierCommonPrefix, right.getQualifierLength() + - qualifierCommonPrefix, left.getQualifierOffset() + qualifierCommonPrefix, + right.getQualifierOffset() + qualifierCommonPrefix); + } + + private static int findCommonPrefix(byte[] left, byte[] right, int leftLength, int rightLength, + int leftOffset, int rightOffset) { + int length = Math.min(leftLength, rightLength); + int result = 0; + + while (result < length && left[leftOffset + result] == right[rightOffset + result]) { + result++; + } + return result; + } + + /**************** equals ****************************/ + + public static boolean equals(Cell a, Cell b) { + return equalsRow(a, b) && equalsFamily(a, b) && equalsQualifier(a, b) && equalsTimestamp(a, b) + && equalsType(a, b); + } + + public static boolean equalsRow(Cell a, Cell b) { + return Bytes.equals(a.getRowArray(), a.getRowOffset(), a.getRowLength(), b.getRowArray(), + b.getRowOffset(), b.getRowLength()); + } + + public static boolean equalsFamily(Cell a, Cell b) { + return Bytes.equals(a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(), + b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength()); + } + + public static boolean equalsQualifier(Cell a, Cell b) { + return Bytes.equals(a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(), + b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength()); + } + + public static boolean equalsTimestamp(Cell a, Cell b) { + return a.getTimestamp() == b.getTimestamp(); + } + + public static boolean equalsType(Cell a, Cell b) { + return a.getTypeByte() == b.getTypeByte(); + } + + /** + * Find index of passed delimiter walking from end of buffer backwards. + * + * @param b + * @param delimiter + * @return Index of delimiter + */ + // Move to Bytes.java + public static int getDelimiterInReverse(final byte[] b, final int offset, final int length, + final int delimiter) { + if (b == null) { + throw new IllegalArgumentException("Passed buffer is null"); + } + int result = -1; + for (int i = (offset + length) - 1; i >= offset; i--) { + if (b[i] == delimiter) { + result = i; + break; + } + } + return result; + } + + /** + * @param b + * @param delimiter + * @return Index of delimiter having started from start of b + * moving rightward. + */ + // Move to Bytes.java + public static int getDelimiter(final byte[] b, int offset, final int length, + final int delimiter) { + if (b == null) { + throw new IllegalArgumentException("Passed buffer is null"); + } + int result = -1; + for (int i = offset; i < length + offset; i++) { + if (b[i] == delimiter) { + result = i; + break; + } + } + return result; + } + } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 7de1f54..9c8cc38 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -21,10 +21,7 @@ package org.apache.hadoop.hbase; import static org.apache.hadoop.hbase.util.Bytes.len; -import java.io.DataInput; -import java.io.DataOutput; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -36,13 +33,12 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CellComparator.MetaCellComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.util.StreamUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.RawComparator; import com.google.common.annotations.VisibleForTesting; @@ -96,17 +92,17 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * Comparator for plain key/values; i.e. non-catalog table key/values. Works on Key portion * of KeyValue only. */ - public static final KVComparator COMPARATOR = new KVComparator(); + public static final CellComparator COMPARATOR = new CellComparator(); /** - * A {@link KVComparator} for hbase:meta catalog table + * A {@link CellComparator} for hbase:meta catalog table * {@link KeyValue}s. */ - public static final KVComparator META_COMPARATOR = new MetaComparator(); + public static final CellComparator META_COMPARATOR = new MetaCellComparator(); /** * Needed for Bloom Filters. */ - public static final KVComparator RAW_COMPARATOR = new RawBytesComparator(); + public static final CellComparator RAW_COMPARATOR = new RawBytesComparator(); /** Size of the key length field in bytes*/ public static final int KEY_LENGTH_SIZE = Bytes.SIZEOF_INT; @@ -1061,7 +1057,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, if (!(other instanceof Cell)) { return false; } - return CellComparator.equals(this, (Cell)other); + return CellUtil.equals(this, (Cell)other); } /** @@ -1637,7 +1633,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * @return The parsed column. */ public static byte [][] parseColumn(byte [] c) { - final int index = getDelimiter(c, 0, c.length, COLUMN_FAMILY_DELIMITER); + final int index = CellUtil.getDelimiter(c, 0, c.length, COLUMN_FAMILY_DELIMITER); if (index == -1) { // If no delimiter, return array of size 1 return new byte [][] { c }; @@ -1670,866 +1666,12 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, } /** - * @param b - * @param delimiter - * @return Index of delimiter having started from start of b - * moving rightward. - */ - public static int getDelimiter(final byte [] b, int offset, final int length, - final int delimiter) { - if (b == null) { - throw new IllegalArgumentException("Passed buffer is null"); - } - int result = -1; - for (int i = offset; i < length + offset; i++) { - if (b[i] == delimiter) { - result = i; - break; - } - } - return result; - } - - /** - * Find index of passed delimiter walking from end of buffer backwards. - * @param b - * @param delimiter - * @return Index of delimiter - */ - public static int getDelimiterInReverse(final byte [] b, final int offset, - final int length, final int delimiter) { - if (b == null) { - throw new IllegalArgumentException("Passed buffer is null"); - } - int result = -1; - for (int i = (offset + length) - 1; i >= offset; i--) { - if (b[i] == delimiter) { - result = i; - break; - } - } - return result; - } - - /** - * A {@link KVComparator} for hbase:meta catalog table - * {@link KeyValue}s. - */ - public static class MetaComparator extends KVComparator { - /** - * Compare key portion of a {@link KeyValue} for keys in hbase:meta - * table. - */ - @Override - public int compare(final Cell left, final Cell right) { - int c = compareRowKey(left, right); - if (c != 0) { - return c; - } - return CellComparator.compareWithoutRow(left, right); - } - - @Override - public int compareOnlyKeyPortion(Cell left, Cell right) { - return compare(left, right); - } - - @Override - public int compareRows(byte [] left, int loffset, int llength, - byte [] right, int roffset, int rlength) { - int leftDelimiter = getDelimiter(left, loffset, llength, - HConstants.DELIMITER); - int rightDelimiter = getDelimiter(right, roffset, rlength, - HConstants.DELIMITER); - // Compare up to the delimiter - int lpart = (leftDelimiter < 0 ? llength :leftDelimiter - loffset); - int rpart = (rightDelimiter < 0 ? rlength :rightDelimiter - roffset); - int result = Bytes.compareTo(left, loffset, lpart, right, roffset, rpart); - if (result != 0) { - return result; - } else { - if (leftDelimiter < 0 && rightDelimiter >= 0) { - return -1; - } else if (rightDelimiter < 0 && leftDelimiter >= 0) { - return 1; - } else if (leftDelimiter < 0 && rightDelimiter < 0) { - return 0; - } - } - // Compare middle bit of the row. - // Move past delimiter - leftDelimiter++; - rightDelimiter++; - int leftFarDelimiter = getDelimiterInReverse(left, leftDelimiter, - llength - (leftDelimiter - loffset), HConstants.DELIMITER); - int rightFarDelimiter = getDelimiterInReverse(right, - rightDelimiter, rlength - (rightDelimiter - roffset), - HConstants.DELIMITER); - // Now compare middlesection of row. - lpart = (leftFarDelimiter < 0 ? llength + loffset: leftFarDelimiter) - leftDelimiter; - rpart = (rightFarDelimiter < 0 ? rlength + roffset: rightFarDelimiter)- rightDelimiter; - result = super.compareRows(left, leftDelimiter, lpart, right, rightDelimiter, rpart); - if (result != 0) { - return result; - } else { - if (leftDelimiter < 0 && rightDelimiter >= 0) { - return -1; - } else if (rightDelimiter < 0 && leftDelimiter >= 0) { - return 1; - } else if (leftDelimiter < 0 && rightDelimiter < 0) { - return 0; - } - } - // Compare last part of row, the rowid. - leftFarDelimiter++; - rightFarDelimiter++; - result = Bytes.compareTo(left, leftFarDelimiter, llength - (leftFarDelimiter - loffset), - right, rightFarDelimiter, rlength - (rightFarDelimiter - roffset)); - return result; - } - - /** - * Don't do any fancy Block Index splitting tricks. - */ - @Override - public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) { - return Arrays.copyOf(rightKey, rightKey.length); - } - - /** - * The HFileV2 file format's trailer contains this class name. We reinterpret this and - * instantiate the appropriate comparator. - * TODO: With V3 consider removing this. - * @return legacy class name for FileFileTrailer#comparatorClassName - */ - @Override - public String getLegacyKeyComparatorName() { - return "org.apache.hadoop.hbase.KeyValue$MetaKeyComparator"; - } - - @Override - protected Object clone() throws CloneNotSupportedException { - return new MetaComparator(); - } - - /** - * Override the row key comparison to parse and compare the meta row key parts. - */ - @Override - protected int compareRowKey(final Cell l, final Cell r) { - byte[] left = l.getRowArray(); - int loffset = l.getRowOffset(); - int llength = l.getRowLength(); - byte[] right = r.getRowArray(); - int roffset = r.getRowOffset(); - int rlength = r.getRowLength(); - return compareRows(left, loffset, llength, right, roffset, rlength); - } - } - - /** - * Compare KeyValues. When we compare KeyValues, we only compare the Key - * portion. This means two KeyValues with same Key but different Values are - * considered the same as far as this Comparator is concerned. - */ - public static class KVComparator implements RawComparator, SamePrefixComparator { - - /** - * The HFileV2 file format's trailer contains this class name. We reinterpret this and - * instantiate the appropriate comparator. - * TODO: With V3 consider removing this. - * @return legacy class name for FileFileTrailer#comparatorClassName - */ - public String getLegacyKeyComparatorName() { - return "org.apache.hadoop.hbase.KeyValue$KeyComparator"; - } - - @Override // RawComparator - public int compare(byte[] l, int loff, int llen, byte[] r, int roff, int rlen) { - return compareFlatKey(l,loff,llen, r,roff,rlen); - } - - - /** - * Compares the only the user specified portion of a Key. This is overridden by MetaComparator. - * @param left - * @param right - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - protected int compareRowKey(final Cell left, final Cell right) { - return CellComparator.compareRows(left, right); - } - - /** - * Compares left to right assuming that left,loffset,llength and right,roffset,rlength are - * full KVs laid out in a flat byte[]s. - * @param left - * @param loffset - * @param llength - * @param right - * @param roffset - * @param rlength - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - public int compareFlatKey(byte[] left, int loffset, int llength, - byte[] right, int roffset, int rlength) { - // Compare row - short lrowlength = Bytes.toShort(left, loffset); - short rrowlength = Bytes.toShort(right, roffset); - int compare = compareRows(left, loffset + Bytes.SIZEOF_SHORT, - lrowlength, right, roffset + Bytes.SIZEOF_SHORT, rrowlength); - if (compare != 0) { - return compare; - } - - // Compare the rest of the two KVs without making any assumptions about - // the common prefix. This function will not compare rows anyway, so we - // don't need to tell it that the common prefix includes the row. - return compareWithoutRow(0, left, loffset, llength, right, roffset, - rlength, rrowlength); - } - - public int compareFlatKey(byte[] left, byte[] right) { - return compareFlatKey(left, 0, left.length, right, 0, right.length); - } - - // compare a key against row/fam/qual/ts/type - public int compareKey(Cell cell, - byte[] row, int roff, int rlen, - byte[] fam, int foff, int flen, - byte[] col, int coff, int clen, - long ts, byte type) { - - int compare = compareRows( - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), - row, roff, rlen); - if (compare != 0) { - return compare; - } - // If the column is not specified, the "minimum" key type appears the - // latest in the sorted order, regardless of the timestamp. This is used - // for specifying the last key/value in a given row, because there is no - // "lexicographically last column" (it would be infinitely long). The - // "maximum" key type does not need this behavior. - if (cell.getFamilyLength() + cell.getQualifierLength() == 0 - && cell.getTypeByte() == Type.Minimum.getCode()) { - // left is "bigger", i.e. it appears later in the sorted order - return 1; - } - if (flen+clen == 0 && type == Type.Minimum.getCode()) { - return -1; - } - - compare = compareFamilies( - cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), - fam, foff, flen); - if (compare != 0) { - return compare; - } - compare = compareColumns( - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), - col, coff, clen); - if (compare != 0) { - return compare; - } - // Next compare timestamps. - compare = compareTimestamps(cell.getTimestamp(), ts); - if (compare != 0) { - return compare; - } - - // Compare types. Let the delete types sort ahead of puts; i.e. types - // of higher numbers sort before those of lesser numbers. Maximum (255) - // appears ahead of everything, and minimum (0) appears after - // everything. - return (0xff & type) - (0xff & cell.getTypeByte()); - } - - public int compareOnlyKeyPortion(Cell left, Cell right) { - return CellComparator.compare(left, right, true); - } - - /** - * Compares the Key of a cell -- with fields being more significant in this order: - * rowkey, colfam/qual, timestamp, type, mvcc - */ - @Override - public int compare(final Cell left, final Cell right) { - int compare = CellComparator.compare(left, right, false); - return compare; - } - - public int compareTimestamps(final Cell left, final Cell right) { - return CellComparator.compareTimestamps(left, right); - } - - /** - * @param left - * @param right - * @return Result comparing rows. - */ - public int compareRows(final Cell left, final Cell right) { - return compareRows(left.getRowArray(),left.getRowOffset(), left.getRowLength(), - right.getRowArray(), right.getRowOffset(), right.getRowLength()); - } - - /** - * Get the b[],o,l for left and right rowkey portions and compare. - * @param left - * @param loffset - * @param llength - * @param right - * @param roffset - * @param rlength - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - public int compareRows(byte [] left, int loffset, int llength, - byte [] right, int roffset, int rlength) { - return Bytes.compareTo(left, loffset, llength, right, roffset, rlength); - } - - int compareColumns(final Cell left, final short lrowlength, final Cell right, - final short rrowlength) { - return CellComparator.compareColumns(left, right); - } - - protected int compareColumns( - byte [] left, int loffset, int llength, final int lfamilylength, - byte [] right, int roffset, int rlength, final int rfamilylength) { - // Compare family portion first. - int diff = Bytes.compareTo(left, loffset, lfamilylength, - right, roffset, rfamilylength); - if (diff != 0) { - return diff; - } - // Compare qualifier portion - return Bytes.compareTo(left, loffset + lfamilylength, - llength - lfamilylength, - right, roffset + rfamilylength, rlength - rfamilylength); - } - - static int compareTimestamps(final long ltimestamp, final long rtimestamp) { - // The below older timestamps sorting ahead of newer timestamps looks - // wrong but it is intentional. This way, newer timestamps are first - // found when we iterate over a memstore and newer versions are the - // first we trip over when reading from a store file. - if (ltimestamp < rtimestamp) { - return 1; - } else if (ltimestamp > rtimestamp) { - return -1; - } - return 0; - } - - /** - * Overridden - * @param commonPrefix - * @param left - * @param loffset - * @param llength - * @param right - * @param roffset - * @param rlength - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - @Override // SamePrefixComparator - public int compareIgnoringPrefix(int commonPrefix, byte[] left, - int loffset, int llength, byte[] right, int roffset, int rlength) { - // Compare row - short lrowlength = Bytes.toShort(left, loffset); - short rrowlength; - - int comparisonResult = 0; - if (commonPrefix < ROW_LENGTH_SIZE) { - // almost nothing in common - rrowlength = Bytes.toShort(right, roffset); - comparisonResult = compareRows(left, loffset + ROW_LENGTH_SIZE, - lrowlength, right, roffset + ROW_LENGTH_SIZE, rrowlength); - } else { // the row length is the same - rrowlength = lrowlength; - if (commonPrefix < ROW_LENGTH_SIZE + rrowlength) { - // The rows are not the same. Exclude the common prefix and compare - // the rest of the two rows. - int common = commonPrefix - ROW_LENGTH_SIZE; - comparisonResult = compareRows( - left, loffset + common + ROW_LENGTH_SIZE, lrowlength - common, - right, roffset + common + ROW_LENGTH_SIZE, rrowlength - common); - } - } - if (comparisonResult != 0) { - return comparisonResult; - } - - assert lrowlength == rrowlength; - return compareWithoutRow(commonPrefix, left, loffset, llength, right, - roffset, rlength, lrowlength); - } - - /** - * Compare columnFamily, qualifier, timestamp, and key type (everything - * except the row). This method is used both in the normal comparator and - * the "same-prefix" comparator. Note that we are assuming that row portions - * of both KVs have already been parsed and found identical, and we don't - * validate that assumption here. - * @param commonPrefix - * the length of the common prefix of the two key-values being - * compared, including row length and row - */ - private int compareWithoutRow(int commonPrefix, byte[] left, int loffset, - int llength, byte[] right, int roffset, int rlength, short rowlength) { - /*** - * KeyValue Format and commonLength: - * |_keyLen_|_valLen_|_rowLen_|_rowKey_|_famiLen_|_fami_|_Quali_|.... - * ------------------|-------commonLength--------|-------------- - */ - int commonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + rowlength; - - // commonLength + TIMESTAMP_TYPE_SIZE - int commonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + commonLength; - // ColumnFamily + Qualifier length. - int lcolumnlength = llength - commonLengthWithTSAndType; - int rcolumnlength = rlength - commonLengthWithTSAndType; - - byte ltype = left[loffset + (llength - 1)]; - byte rtype = right[roffset + (rlength - 1)]; - - // If the column is not specified, the "minimum" key type appears the - // latest in the sorted order, regardless of the timestamp. This is used - // for specifying the last key/value in a given row, because there is no - // "lexicographically last column" (it would be infinitely long). The - // "maximum" key type does not need this behavior. - if (lcolumnlength == 0 && ltype == Type.Minimum.getCode()) { - // left is "bigger", i.e. it appears later in the sorted order - return 1; - } - if (rcolumnlength == 0 && rtype == Type.Minimum.getCode()) { - return -1; - } - - int lfamilyoffset = commonLength + loffset; - int rfamilyoffset = commonLength + roffset; - - // Column family length. - int lfamilylength = left[lfamilyoffset - 1]; - int rfamilylength = right[rfamilyoffset - 1]; - // If left family size is not equal to right family size, we need not - // compare the qualifiers. - boolean sameFamilySize = (lfamilylength == rfamilylength); - int common = 0; - if (commonPrefix > 0) { - common = Math.max(0, commonPrefix - commonLength); - if (!sameFamilySize) { - // Common should not be larger than Math.min(lfamilylength, - // rfamilylength). - common = Math.min(common, Math.min(lfamilylength, rfamilylength)); - } else { - common = Math.min(common, Math.min(lcolumnlength, rcolumnlength)); - } - } - if (!sameFamilySize) { - // comparing column family is enough. - return Bytes.compareTo(left, lfamilyoffset + common, lfamilylength - - common, right, rfamilyoffset + common, rfamilylength - common); - } - // Compare family & qualifier together. - final int comparison = Bytes.compareTo(left, lfamilyoffset + common, - lcolumnlength - common, right, rfamilyoffset + common, - rcolumnlength - common); - if (comparison != 0) { - return comparison; - } - - //// - // Next compare timestamps. - long ltimestamp = Bytes.toLong(left, - loffset + (llength - TIMESTAMP_TYPE_SIZE)); - long rtimestamp = Bytes.toLong(right, - roffset + (rlength - TIMESTAMP_TYPE_SIZE)); - int compare = compareTimestamps(ltimestamp, rtimestamp); - if (compare != 0) { - return compare; - } - - // Compare types. Let the delete types sort ahead of puts; i.e. types - // of higher numbers sort before those of lesser numbers. Maximum (255) - // appears ahead of everything, and minimum (0) appears after - // everything. - return (0xff & rtype) - (0xff & ltype); - } - - protected int compareFamilies(final byte[] left, final int loffset, final int lfamilylength, - final byte[] right, final int roffset, final int rfamilylength) { - int diff = Bytes.compareTo(left, loffset, lfamilylength, right, roffset, rfamilylength); - return diff; - } - - protected int compareColumns(final byte[] left, final int loffset, final int lquallength, - final byte[] right, final int roffset, final int rquallength) { - int diff = Bytes.compareTo(left, loffset, lquallength, right, roffset, rquallength); - return diff; - } - /** - * Compares the row and column of two keyvalues for equality - * @param left - * @param right - * @return True if same row and column. - */ - public boolean matchingRowColumn(final Cell left, - final Cell right) { - short lrowlength = left.getRowLength(); - short rrowlength = right.getRowLength(); - - // TsOffset = end of column data. just comparing Row+CF length of each - if ((left.getRowLength() + left.getFamilyLength() + left.getQualifierLength()) != (right - .getRowLength() + right.getFamilyLength() + right.getQualifierLength())) { - return false; - } - - if (!matchingRows(left, lrowlength, right, rrowlength)) { - return false; - } - - int lfoffset = left.getFamilyOffset(); - int rfoffset = right.getFamilyOffset(); - int lclength = left.getQualifierLength(); - int rclength = right.getQualifierLength(); - int lfamilylength = left.getFamilyLength(); - int rfamilylength = right.getFamilyLength(); - int diff = compareFamilies(left.getFamilyArray(), lfoffset, lfamilylength, - right.getFamilyArray(), rfoffset, rfamilylength); - if (diff != 0) { - return false; - } else { - diff = compareColumns(left.getQualifierArray(), left.getQualifierOffset(), lclength, - right.getQualifierArray(), right.getQualifierOffset(), rclength); - return diff == 0; - } - } - - /** - * Compares the row of two keyvalues for equality - * @param left - * @param right - * @return True if rows match. - */ - public boolean matchingRows(final Cell left, final Cell right) { - short lrowlength = left.getRowLength(); - short rrowlength = right.getRowLength(); - return matchingRows(left, lrowlength, right, rrowlength); - } - - /** - * @param left - * @param lrowlength - * @param right - * @param rrowlength - * @return True if rows match. - */ - private boolean matchingRows(final Cell left, final short lrowlength, - final Cell right, final short rrowlength) { - return lrowlength == rrowlength && - matchingRows(left.getRowArray(), left.getRowOffset(), lrowlength, - right.getRowArray(), right.getRowOffset(), rrowlength); - } - - /** - * Compare rows. Just calls Bytes.equals, but it's good to have this encapsulated. - * @param left Left row array. - * @param loffset Left row offset. - * @param llength Left row length. - * @param right Right row array. - * @param roffset Right row offset. - * @param rlength Right row length. - * @return Whether rows are the same row. - */ - public boolean matchingRows(final byte [] left, final int loffset, final int llength, - final byte [] right, final int roffset, final int rlength) { - return Bytes.equals(left, loffset, llength, right, roffset, rlength); - } - - public byte[] calcIndexKey(byte[] lastKeyOfPreviousBlock, byte[] firstKeyInBlock) { - byte[] fakeKey = getShortMidpointKey(lastKeyOfPreviousBlock, firstKeyInBlock); - if (compareFlatKey(fakeKey, firstKeyInBlock) > 0) { - LOG.error("Unexpected getShortMidpointKey result, fakeKey:" - + Bytes.toStringBinary(fakeKey) + ", firstKeyInBlock:" - + Bytes.toStringBinary(firstKeyInBlock)); - return firstKeyInBlock; - } - if (lastKeyOfPreviousBlock != null && compareFlatKey(lastKeyOfPreviousBlock, fakeKey) >= 0) { - LOG.error("Unexpected getShortMidpointKey result, lastKeyOfPreviousBlock:" + - Bytes.toStringBinary(lastKeyOfPreviousBlock) + ", fakeKey:" + - Bytes.toStringBinary(fakeKey)); - return firstKeyInBlock; - } - return fakeKey; - } - - /** - * This is a HFile block index key optimization. - * @param leftKey - * @param rightKey - * @return 0 if equal, <0 if left smaller, >0 if right smaller - * @deprecated Since 0.99.2; Use - * {@link CellComparator#getMidpoint(KeyValue.KVComparator, Cell, Cell) instead} - */ - @Deprecated - public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) { - if (rightKey == null) { - throw new IllegalArgumentException("rightKey can not be null"); - } - if (leftKey == null) { - return Arrays.copyOf(rightKey, rightKey.length); - } - if (compareFlatKey(leftKey, rightKey) >= 0) { - throw new IllegalArgumentException("Unexpected input, leftKey:" + Bytes.toString(leftKey) - + ", rightKey:" + Bytes.toString(rightKey)); - } - - short leftRowLength = Bytes.toShort(leftKey, 0); - short rightRowLength = Bytes.toShort(rightKey, 0); - int leftCommonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + leftRowLength; - int rightCommonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + rightRowLength; - int leftCommonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + leftCommonLength; - int rightCommonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + rightCommonLength; - int leftColumnLength = leftKey.length - leftCommonLengthWithTSAndType; - int rightColumnLength = rightKey.length - rightCommonLengthWithTSAndType; - // rows are equal - if (leftRowLength == rightRowLength && compareRows(leftKey, ROW_LENGTH_SIZE, leftRowLength, - rightKey, ROW_LENGTH_SIZE, rightRowLength) == 0) { - // Compare family & qualifier together. - int comparison = Bytes.compareTo(leftKey, leftCommonLength, leftColumnLength, rightKey, - rightCommonLength, rightColumnLength); - // same with "row + family + qualifier", return rightKey directly - if (comparison == 0) { - return Arrays.copyOf(rightKey, rightKey.length); - } - // "family + qualifier" are different, generate a faked key per rightKey - byte[] newKey = Arrays.copyOf(rightKey, rightKey.length); - Bytes.putLong(newKey, rightKey.length - TIMESTAMP_TYPE_SIZE, HConstants.LATEST_TIMESTAMP); - Bytes.putByte(newKey, rightKey.length - TYPE_SIZE, Type.Maximum.getCode()); - return newKey; - } - // rows are different - short minLength = leftRowLength < rightRowLength ? leftRowLength : rightRowLength; - short diffIdx = 0; - while (diffIdx < minLength - && leftKey[ROW_LENGTH_SIZE + diffIdx] == rightKey[ROW_LENGTH_SIZE + diffIdx]) { - diffIdx++; - } - byte[] newRowKey = null; - if (diffIdx >= minLength) { - // leftKey's row is prefix of rightKey's. - newRowKey = new byte[diffIdx + 1]; - System.arraycopy(rightKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx + 1); - } else { - int diffByte = leftKey[ROW_LENGTH_SIZE + diffIdx]; - if ((0xff & diffByte) < 0xff && (diffByte + 1) < - (rightKey[ROW_LENGTH_SIZE + diffIdx] & 0xff)) { - newRowKey = new byte[diffIdx + 1]; - System.arraycopy(leftKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx); - newRowKey[diffIdx] = (byte) (diffByte + 1); - } else { - newRowKey = new byte[diffIdx + 1]; - System.arraycopy(rightKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx + 1); - } - } - return new KeyValue(newRowKey, null, null, HConstants.LATEST_TIMESTAMP, - Type.Maximum).getKey(); - } - - @Override - protected Object clone() throws CloneNotSupportedException { - super.clone(); - return new KVComparator(); - } - - } - - /** - * @param b - * @return A KeyValue made of a byte array that holds the key-only part. - * Needed to convert hfile index members to KeyValues. - */ - public static KeyValue createKeyValueFromKey(final byte [] b) { - return createKeyValueFromKey(b, 0, b.length); - } - - /** - * @param bb - * @return A KeyValue made of a byte buffer that holds the key-only part. - * Needed to convert hfile index members to KeyValues. - */ - public static KeyValue createKeyValueFromKey(final ByteBuffer bb) { - return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit()); - } - - /** - * @param b - * @param o - * @param l - * @return A KeyValue made of a byte array that holds the key-only part. - * Needed to convert hfile index members to KeyValues. - */ - public static KeyValue createKeyValueFromKey(final byte [] b, final int o, - final int l) { - byte [] newb = new byte[l + ROW_OFFSET]; - System.arraycopy(b, o, newb, ROW_OFFSET, l); - Bytes.putInt(newb, 0, l); - Bytes.putInt(newb, Bytes.SIZEOF_INT, 0); - return new KeyValue(newb); - } - - /** - * @param in Where to read bytes from. Creates a byte array to hold the KeyValue - * backing bytes copied from the steam. - * @return KeyValue created by deserializing from in OR if we find a length - * of zero, we will return null which can be useful marking a stream as done. - * @throws IOException - */ - public static KeyValue create(final DataInput in) throws IOException { - return create(in.readInt(), in); - } - - /** - * Create a KeyValue reading length from in - * @param length - * @param in - * @return Created KeyValue OR if we find a length of zero, we will return null which - * can be useful marking a stream as done. - * @throws IOException - */ - public static KeyValue create(int length, final DataInput in) throws IOException { - - if (length <= 0) { - if (length == 0) return null; - throw new IOException("Failed read " + length + " bytes, stream corrupt?"); - } - - // This is how the old Writables.readFrom used to deserialize. Didn't even vint. - byte [] bytes = new byte[length]; - in.readFully(bytes); - return new KeyValue(bytes, 0, length); - } - - /** - * Create a new KeyValue by copying existing cell and adding new tags - * @param c - * @param newTags - * @return a new KeyValue instance with new tags - */ - public static KeyValue cloneAndAddTags(Cell c, List newTags) { - List existingTags = null; - if(c.getTagsLength() > 0) { - existingTags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); - existingTags.addAll(newTags); - } else { - existingTags = newTags; - } - return new KeyValue(c.getRowArray(), c.getRowOffset(), (int)c.getRowLength(), - c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(), - c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(), - c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(), - c.getValueLength(), existingTags); - } - - /** - * Create a KeyValue reading from the raw InputStream. - * Named iscreate so doesn't clash with {@link #create(DataInput)} - * @param in - * @return Created KeyValue OR if we find a length of zero, we will return null which - * can be useful marking a stream as done. - * @throws IOException - */ - public static KeyValue iscreate(final InputStream in) throws IOException { - byte [] intBytes = new byte[Bytes.SIZEOF_INT]; - int bytesRead = 0; - while (bytesRead < intBytes.length) { - int n = in.read(intBytes, bytesRead, intBytes.length - bytesRead); - if (n < 0) { - if (bytesRead == 0) return null; // EOF at start is ok - throw new IOException("Failed read of int, read " + bytesRead + " bytes"); - } - bytesRead += n; - } - // TODO: perhaps some sanity check is needed here. - byte [] bytes = new byte[Bytes.toInt(intBytes)]; - IOUtils.readFully(in, bytes, 0, bytes.length); - return new KeyValue(bytes, 0, bytes.length); - } - - /** - * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable. - * @param kv - * @param out - * @return Length written on stream - * @throws IOException - * @see #create(DataInput) for the inverse function - */ - public static long write(final KeyValue kv, final DataOutput out) throws IOException { - // This is how the old Writables write used to serialize KVs. Need to figure way to make it - // work for all implementations. - int length = kv.getLength(); - out.writeInt(length); - out.write(kv.getBuffer(), kv.getOffset(), length); - return length + Bytes.SIZEOF_INT; - } - - /** - * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable but do - * not require a {@link DataOutput}, just take plain {@link OutputStream} - * Named oswrite so does not clash with {@link #write(KeyValue, DataOutput)} - * @param kv - * @param out - * @return Length written on stream - * @throws IOException - * @see #create(DataInput) for the inverse function - * @see #write(KeyValue, DataOutput) - * @deprecated use {@link #oswrite(KeyValue, OutputStream, boolean)} instead - */ - @Deprecated - public static long oswrite(final KeyValue kv, final OutputStream out) - throws IOException { - int length = kv.getLength(); - // This does same as DataOuput#writeInt (big-endian, etc.) - out.write(Bytes.toBytes(length)); - out.write(kv.getBuffer(), kv.getOffset(), length); - return length + Bytes.SIZEOF_INT; - } - - /** - * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable but do - * not require a {@link DataOutput}, just take plain {@link OutputStream} - * Named oswrite so does not clash with {@link #write(KeyValue, DataOutput)} - * @param kv - * @param out - * @param withTags - * @return Length written on stream - * @throws IOException - * @see #create(DataInput) for the inverse function - * @see #write(KeyValue, DataOutput) - * @see KeyValueUtil#oswrite(Cell, OutputStream, boolean) - */ - public static long oswrite(final KeyValue kv, final OutputStream out, final boolean withTags) - throws IOException { - // In KeyValueUtil#oswrite we do a Cell serialization as KeyValue. Any changes doing here, pls - // check KeyValueUtil#oswrite also and do necessary changes. - int length = kv.getLength(); - if (!withTags) { - length = kv.getKeyLength() + kv.getValueLength() + KEYVALUE_INFRASTRUCTURE_SIZE; - } - // This does same as DataOuput#writeInt (big-endian, etc.) - StreamUtils.writeInt(out, length); - out.write(kv.getBuffer(), kv.getOffset(), length); - return length + Bytes.SIZEOF_INT; - } - - /** * Comparator that compares row component only of a KeyValue. */ public static class RowOnlyComparator implements Comparator { - final KVComparator comparator; + final CellComparator comparator; - public RowOnlyComparator(final KVComparator c) { + public RowOnlyComparator(final CellComparator c) { this.comparator = c; } @@ -2538,27 +1680,11 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, return comparator.compareRows(left, right); } } - - - /** - * Avoids redundant comparisons for better performance. - * - * TODO get rid of this wart - */ - public interface SamePrefixComparator { - /** - * Compare two keys assuming that the first n bytes are the same. - * @param commonPrefix How many bytes are the same. - */ - int compareIgnoringPrefix(int commonPrefix, byte[] left, int loffset, int llength, - byte[] right, int roffset, int rlength - ); - } - + /** * This is a TEST only Comparator used in TestSeekTo and TestReseekTo. */ - public static class RawBytesComparator extends KVComparator { + public static class RawBytesComparator extends CellComparator { /** * The HFileV2 file format's trailer contains this class name. We reinterpret this and * instantiate the appropriate comparator. @@ -2620,6 +1746,36 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, } /** + * Write out a KeyValue in the manner in which we used to when KeyValue was a + * Writable but do not require a {@link DataOutput}, just take plain + * {@link OutputStream} Named oswrite so does not clash with + * {@link #write(KeyValue, DataOutput)} + * + * @param kv + * @param out + * @param withTags + * @return Length written on stream + * @throws IOException + * @see KeyValueUtil#create(DataInput) for the inverse function + * @see KeyValueUtil#write(KeyValue, DataOutput) + * @see KeyValueUtil#oswrite(Cell, OutputStream, boolean) + */ + public static long oswrite(final KeyValue kv, final OutputStream out, final boolean withTags) + throws IOException { + // In KeyValueUtil#oswrite we do a Cell serialization as KeyValue. Any + // changes doing here, pls + // check KeyValueUtil#oswrite also and do necessary changes. + int length = kv.getLength(); + if (!withTags) { + length = kv.getKeyLength() + kv.getValueLength() + KEYVALUE_INFRASTRUCTURE_SIZE; + } + // This does same as DataOuput#writeInt (big-endian, etc.) + StreamUtils.writeInt(out, length); + out.write(kv.getBuffer(), kv.getOffset(), length); + return length + Bytes.SIZEOF_INT; + } + + /** * HeapSize implementation * * We do not count the bytes in the rowCache because it should be empty for a KeyValue in the diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java index f0c8b48..50a409d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java @@ -85,7 +85,7 @@ public class KeyValueTestUtil { for (Cell kv1 : kvCollection1) { boolean found = false; for (Cell kv2 : kvCollection2) { - if (CellComparator.equalsIgnoreMvccVersion(kv1, kv2)) found = true; + if (CellUtil.equalsIgnoreMvccVersion(kv1, kv2)) found = true; } if (!found) return false; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java index dde15bc..f37ab24 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java @@ -18,7 +18,10 @@ package org.apache.hadoop.hbase; +import java.io.DataInput; +import java.io.DataOutput; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -31,8 +34,10 @@ import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.IterableUtils; import org.apache.hadoop.hbase.util.SimpleMutableByteRange; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.WritableUtils; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Function; import com.google.common.collect.Lists; @@ -541,6 +546,168 @@ public class KeyValueUtil { }); return new ArrayList(lazyList); } + /** + * Write out a KeyValue in the manner in which we used to when KeyValue was a + * Writable. + * + * @param kv + * @param out + * @return Length written on stream + * @throws IOException + * @see #create(DataInput) for the inverse function + */ + public static long write(final KeyValue kv, final DataOutput out) throws IOException { + // This is how the old Writables write used to serialize KVs. Need to figure + // way to make it + // work for all implementations. + int length = kv.getLength(); + out.writeInt(length); + out.write(kv.getBuffer(), kv.getOffset(), length); + return length + Bytes.SIZEOF_INT; + } + + /** + * Create a KeyValue reading from the raw InputStream. Named + * iscreate so doesn't clash with {@link #create(DataInput)} + * + * @param in + * @return Created KeyValue OR if we find a length of zero, we will return + * null which can be useful marking a stream as done. + * @throws IOException + */ + public static KeyValue iscreate(final InputStream in) throws IOException { + byte[] intBytes = new byte[Bytes.SIZEOF_INT]; + int bytesRead = 0; + while (bytesRead < intBytes.length) { + int n = in.read(intBytes, bytesRead, intBytes.length - bytesRead); + if (n < 0) { + if (bytesRead == 0) + return null; // EOF at start is ok + throw new IOException("Failed read of int, read " + bytesRead + " bytes"); + } + bytesRead += n; + } + // TODO: perhaps some sanity check is needed here. + byte[] bytes = new byte[Bytes.toInt(intBytes)]; + IOUtils.readFully(in, bytes, 0, bytes.length); + return new KeyValue(bytes, 0, bytes.length); + } + + /** + * @param commonPrefix + * @param left + * @param loffset + * @param llength + * @param right + * @param roffset + * @param rlength + * @return 0 if equal, <0 if left smaller, >0 if right smaller + */ + @VisibleForTesting + // SamePrefixComparator + // Used only in testcases + public static int compareIgnoringPrefix(int commonPrefix, byte[] left, + int loffset, int llength, byte[] right, int roffset, int rlength) { + // Compare row + short lrowlength = Bytes.toShort(left, loffset); + short rrowlength; + + int comparisonResult = 0; + if (commonPrefix < KeyValue.ROW_LENGTH_SIZE) { + // almost nothing in common + rrowlength = Bytes.toShort(right, roffset); + comparisonResult = KeyValue.COMPARATOR.compareRows(left, loffset + + KeyValue.ROW_LENGTH_SIZE, lrowlength, right, roffset + KeyValue.ROW_LENGTH_SIZE, + rrowlength); + } else { // the row length is the same + rrowlength = lrowlength; + if (commonPrefix < KeyValue.ROW_LENGTH_SIZE + rrowlength) { + // The rows are not the same. Exclude the common prefix and compare + // the rest of the two rows. + int common = commonPrefix - KeyValue.ROW_LENGTH_SIZE; + comparisonResult = KeyValue.COMPARATOR.compareRows(left, loffset + common + + KeyValue.ROW_LENGTH_SIZE, lrowlength - common, right, roffset + common + + KeyValue.ROW_LENGTH_SIZE, rrowlength - common); + } + } + if (comparisonResult != 0) { + return comparisonResult; + } + + assert lrowlength == rrowlength; + return KeyValue.COMPARATOR.compareWithoutRow(commonPrefix, left, loffset, llength, right, + roffset, rlength, lrowlength); + } + + /** + * @param b + * @return A KeyValue made of a byte array that holds the key-only part. + * Needed to convert hfile index members to KeyValues. + */ + public static KeyValue createKeyValueFromKey(final byte[] b) { + return createKeyValueFromKey(b, 0, b.length); + } + + /** + * @param bb + * @return A KeyValue made of a byte buffer that holds the key-only part. + * Needed to convert hfile index members to KeyValues. + */ + public static KeyValue createKeyValueFromKey(final ByteBuffer bb) { + return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit()); + } + + /** + * @param b + * @param o + * @param l + * @return A KeyValue made of a byte array that holds the key-only part. + * Needed to convert hfile index members to KeyValues. + */ + public static KeyValue createKeyValueFromKey(final byte[] b, final int o, final int l) { + byte[] newb = new byte[l + KeyValue.ROW_OFFSET]; + System.arraycopy(b, o, newb, KeyValue.ROW_OFFSET, l); + Bytes.putInt(newb, 0, l); + Bytes.putInt(newb, Bytes.SIZEOF_INT, 0); + return new KeyValue(newb); + } + + /** + * @param in + * Where to read bytes from. Creates a byte array to hold the + * KeyValue backing bytes copied from the steam. + * @return KeyValue created by deserializing from in OR if we + * find a length of zero, we will return null which can be useful + * marking a stream as done. + * @throws IOException + */ + public static KeyValue create(final DataInput in) throws IOException { + return create(in.readInt(), in); + } + + /** + * Create a KeyValue reading length from in + * + * @param length + * @param in + * @return Created KeyValue OR if we find a length of zero, we will return + * null which can be useful marking a stream as done. + * @throws IOException + */ + public static KeyValue create(int length, final DataInput in) throws IOException { + + if (length <= 0) { + if (length == 0) + return null; + throw new IOException("Failed read " + length + " bytes, stream corrupt?"); + } + + // This is how the old Writables.readFrom used to deserialize. Didn't even + // vint. + byte[] bytes = new byte[length]; + in.readFully(bytes); + return new KeyValue(bytes, 0, length); + } public static void oswrite(final Cell cell, final OutputStream out, final boolean withTags) throws IOException { @@ -576,7 +743,8 @@ public class KeyValueUtil { // write tags if we have to if (withTags) { // 2 bytes tags length followed by tags bytes - // tags length is serialized with 2 bytes only(short way) even if the type is int. As this + // tags length is serialized with 2 bytes only(short way) even if the + // type is int. As this // is non -ve numbers, we save the sign bit. See HBASE-11437 out.write((byte) (0xff & (tlen >> 8))); out.write((byte) (0xff & tlen)); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java index 17fd3b7..713918f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -23,7 +23,6 @@ import java.util.Arrays; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.Bytes; @@ -514,7 +513,7 @@ public final class TableName implements Comparable { * * @return The comparator. */ - public KVComparator getRowComparator() { + public CellComparator getRowComparator() { if(TableName.META_TABLE_NAME.equals(this)) { return KeyValue.META_COMPARATOR; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java index f41d6b0..bdcb945 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java @@ -65,7 +65,7 @@ public class KeyValueCodec implements Codec { } protected Cell parseCell() throws IOException { - return KeyValue.iscreate(in); + return KeyValueUtil.iscreate(in); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java index 664fcac..c6831ef 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java @@ -71,7 +71,7 @@ public class KeyValueCodecWithTags implements Codec { } protected Cell parseCell() throws IOException { - return KeyValue.iscreate(in); + return KeyValueUtil.iscreate(in); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java index be8c192..d9ab044 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java @@ -26,8 +26,6 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.KeyValue.SamePrefixComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.SettableSequenceId; @@ -520,14 +518,15 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { BufferedEncodedSeeker implements EncodedSeeker { protected HFileBlockDecodingContext decodingCtx; - protected final KVComparator comparator; - protected final SamePrefixComparator samePrefixComparator; + protected final CellComparator comparator; + // remove this? + protected final CellComparator samePrefixComparator; protected ByteBuffer currentBuffer; protected STATE current = createSeekerState(); // always valid protected STATE previous = createSeekerState(); // may not be valid protected TagCompressionContext tagCompressionContext = null; - public BufferedEncodedSeeker(KVComparator comparator, + public BufferedEncodedSeeker(CellComparator comparator, HFileBlockDecodingContext decodingCtx) { this.comparator = comparator; this.samePrefixComparator = comparator; @@ -550,13 +549,12 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { } @Override - public int compareKey(KVComparator comparator, byte[] key, int offset, int length) { - return comparator.compareFlatKey(key, offset, length, - current.keyBuffer, 0, current.keyLength); + public int compareKey(CellComparator comparator, byte[] key, int offset, int length) { + return compareKey(comparator, new KeyValue.KeyOnlyKeyValue(key, offset, length)); } @Override - public int compareKey(KVComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, Cell key) { return comparator.compareOnlyKeyPortion(key, new KeyValue.KeyOnlyKeyValue(current.keyBuffer, 0, current.keyLength)); } @@ -703,9 +701,10 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { if (current.lastCommonPrefix <= 2) { rowCommonPrefix = 0; } - rowCommonPrefix += CellComparator.findCommonPrefixInRowPart(seekCell, currentCell, + rowCommonPrefix += CellUtil.findCommonPrefixInRowPart(seekCell, currentCell, + rowCommonPrefix); + comp = samePrefixComparator.compareCommonRowPrefix(seekCell, currentCell, rowCommonPrefix); - comp = CellComparator.compareCommonRowPrefix(seekCell, currentCell, rowCommonPrefix); if (comp == 0) { comp = compareTypeBytes(seekCell, currentCell); if (comp == 0) { @@ -714,9 +713,9 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { 0, Math.min(familyCommonPrefix, current.lastCommonPrefix - (3 + currentCell.getRowLength()))); - familyCommonPrefix += CellComparator.findCommonPrefixInFamilyPart(seekCell, + familyCommonPrefix += CellUtil.findCommonPrefixInFamilyPart(seekCell, currentCell, familyCommonPrefix); - comp = CellComparator.compareCommonFamilyPrefix(seekCell, currentCell, + comp = samePrefixComparator.compareCommonFamilyPrefix(seekCell, currentCell, familyCommonPrefix); if (comp == 0) { // subtract the rowkey fixed length and the family key fixed @@ -727,12 +726,12 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { qualCommonPrefix, current.lastCommonPrefix - (3 + currentCell.getRowLength() + currentCell.getFamilyLength()))); - qualCommonPrefix += CellComparator.findCommonPrefixInQualifierPart(seekCell, + qualCommonPrefix += CellUtil.findCommonPrefixInQualifierPart(seekCell, currentCell, qualCommonPrefix); - comp = CellComparator.compareCommonQualifierPrefix(seekCell, currentCell, + comp = samePrefixComparator.compareCommonQualifierPrefix(seekCell, currentCell, qualCommonPrefix); if (comp == 0) { - comp = CellComparator.compareTimestamps(seekCell, currentCell); + comp = samePrefixComparator.compareTimestamps(seekCell, currentCell); if (comp == 0) { // Compare types. Let the delete types sort ahead of puts; // i.e. types diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java index 6b87c77..6fa01c0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java @@ -22,9 +22,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.ByteBufferUtils; @@ -81,7 +81,7 @@ public class CopyKeyDataBlockEncoder extends BufferedDataBlockEncoder { } @Override - public EncodedSeeker createSeeker(KVComparator comparator, + public EncodedSeeker createSeeker(CellComparator comparator, final HFileBlockDecodingContext decodingCtx) { return new BufferedEncodedSeeker(comparator, decodingCtx) { @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java index 872c22c..72b056d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -98,7 +98,7 @@ public interface DataBlockEncoder { * @param decodingCtx * @return A newly created seeker. */ - EncodedSeeker createSeeker(KVComparator comparator, + EncodedSeeker createSeeker(CellComparator comparator, HFileBlockDecodingContext decodingCtx); /** @@ -218,8 +218,8 @@ public interface DataBlockEncoder { * @param length * @return -1 is the passed key is smaller than the current key, 0 if equal and 1 if greater */ - public int compareKey(KVComparator comparator, byte[] key, int offset, int length); + public int compareKey(CellComparator comparator, byte[] key, int offset, int length); - public int compareKey(KVComparator comparator, Cell key); + public int compareKey(CellComparator comparator, Cell key); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java index 4182dc4..c55400b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java @@ -22,9 +22,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.ByteBufferUtils; @@ -372,7 +372,7 @@ public class DiffKeyDeltaEncoder extends BufferedDataBlockEncoder { } @Override - public EncodedSeeker createSeeker(KVComparator comparator, + public EncodedSeeker createSeeker(CellComparator comparator, HFileBlockDecodingContext decodingCtx) { return new BufferedEncodedSeeker(comparator, decodingCtx) { private byte[] familyNameWithSize; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java index a6f43d0..3000993 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java @@ -22,9 +22,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.ByteBufferUtils; @@ -392,7 +392,7 @@ public class FastDiffDeltaEncoder extends BufferedDataBlockEncoder { } @Override - public EncodedSeeker createSeeker(KVComparator comparator, + public EncodedSeeker createSeeker(CellComparator comparator, final HFileBlockDecodingContext decodingCtx) { return new BufferedEncodedSeeker(comparator, decodingCtx) { private void decode(boolean isFirst) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java index 0286eca..1350366 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java @@ -22,9 +22,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.ByteBufferUtils; @@ -196,7 +196,7 @@ public class PrefixKeyDeltaEncoder extends BufferedDataBlockEncoder { } @Override - public EncodedSeeker createSeeker(KVComparator comparator, + public EncodedSeeker createSeeker(CellComparator comparator, final HFileBlockDecodingContext decodingCtx) { return new BufferedEncodedSeeker(comparator, decodingCtx) { @Override diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java index 007f826..b96c872 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -30,6 +29,7 @@ import org.junit.experimental.categories.Category; @Category({MiscTests.class, SmallTests.class}) public class TestCellComparator { + private CellComparator comparator = new CellComparator(); byte[] row1 = Bytes.toBytes("row1"); byte[] row2 = Bytes.toBytes("row2"); byte[] row_1_0 = Bytes.toBytes("row10"); @@ -47,98 +47,98 @@ public class TestCellComparator { public void testCompareCells() { KeyValue kv1 = new KeyValue(row1, fam1, qual1, val); KeyValue kv2 = new KeyValue(row2, fam1, qual1, val); - assertTrue((CellComparator.compare(kv1, kv2, false)) < 0); + assertTrue((comparator.compare(kv1, kv2, false)) < 0); kv1 = new KeyValue(row1, fam2, qual1, val); kv2 = new KeyValue(row1, fam1, qual1, val); - assertTrue((CellComparator.compareFamilies(kv1, kv2) > 0)); + assertTrue((comparator.compareFamilies(kv1, kv2) > 0)); kv1 = new KeyValue(row1, fam1, qual1, 1l, val); kv2 = new KeyValue(row1, fam1, qual1, 2l, val); - assertTrue((CellComparator.compare(kv1, kv2, false) > 0)); + assertTrue((comparator.compare(kv1, kv2, false) > 0)); kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); kv2 = new KeyValue(row1, fam1, qual1, 1l, Type.Maximum); - assertTrue((CellComparator.compare(kv1, kv2, false) > 0)); + assertTrue((comparator.compare(kv1, kv2, false) > 0)); kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); kv2 = new KeyValue(row1, fam_1_2, qual1, 1l, Type.Maximum); - assertTrue((CellComparator.compareCommonFamilyPrefix(kv1, kv2, 4) < 0)); + assertTrue((comparator.compareCommonFamilyPrefix(kv1, kv2, 4) < 0)); kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); kv2 = new KeyValue(row_1_0, fam_1_2, qual1, 1l, Type.Maximum); - assertTrue((CellComparator.compareCommonRowPrefix(kv1, kv2, 4) < 0)); + assertTrue((comparator.compareCommonRowPrefix(kv1, kv2, 4) < 0)); kv1 = new KeyValue(row1, fam1, qual2, 1l, Type.Put); kv2 = new KeyValue(row1, fam1, qual1, 1l, Type.Maximum); - assertTrue((CellComparator.compareCommonQualifierPrefix(kv1, kv2, 4) > 0)); + assertTrue((comparator.compareCommonQualifierPrefix(kv1, kv2, 4) > 0)); kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); kv2 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); - assertTrue((CellComparator.equals(kv1, kv2))); + assertTrue((CellUtil.equals(kv1, kv2))); } @Test public void testGetShortMidpoint() { - KeyValue.KVComparator comparator = new KeyValue.KVComparator(); + CellComparator comparator = new CellComparator(); Cell left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); Cell right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); Cell mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) <= 0); - assertTrue(CellComparator.compare(mid, right, true) <= 0); + assertTrue(comparator.compare(left, mid, true) <= 0); + assertTrue(comparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) <= 0); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) <= 0); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("bbbbbbb"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) < 0); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) < 0); assertEquals(1, (int)mid.getRowLength()); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("a")); mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) <= 0); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaa"), Bytes.toBytes("b")); mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) < 0); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) < 0); assertEquals(2, (int)mid.getFamilyLength()); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaaa")); mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) < 0); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) < 0); assertEquals(2, (int)mid.getQualifierLength()); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("b")); mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) <= 0); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) <= 0); assertEquals(1, (int)mid.getQualifierLength()); // Assert that if meta comparator, it returns the right cell -- i.e. no optimization done. left = CellUtil.createCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); - mid = CellComparator.getMidpoint(new KeyValue.MetaComparator(), left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) == 0); + mid = CellComparator.getMidpoint(KeyValue.META_COMPARATOR, left, right); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) == 0); } } \ No newline at end of file diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index 5daeefb..f602110 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -18,6 +18,10 @@ */ package org.apache.hadoop.hbase; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; import java.io.IOException; import java.util.Arrays; import java.util.Collections; @@ -30,8 +34,7 @@ import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.KeyValue.MetaComparator; +import org.apache.hadoop.hbase.CellComparator.MetaCellComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.util.Bytes; @@ -104,7 +107,7 @@ public class TestKeyValue extends TestCase { final byte [] fam = Bytes.toBytes("col"); final byte [] qf = Bytes.toBytes("umn"); KeyValue aaa = new KeyValue(a, fam, qf, a); - KeyValue bbb = new KeyValue(b, fam, qf, b); + KeyValue bbb = new KeyValue(b, fam, qf, b); assertTrue(KeyValue.COMPARATOR.compare(aaa, bbb) < 0); assertTrue(KeyValue.COMPARATOR.compare(bbb, aaa) > 0); // Compare breaks if passed same ByteBuffer as both left and right arguments. @@ -133,7 +136,7 @@ public class TestKeyValue extends TestCase { Bytes.toBytes("TestScanMultipleVersions,row_0500,1236020145502"), now); KeyValue bbb = new KeyValue( Bytes.toBytes("TestScanMultipleVersions,,99999999999999"), now); - KVComparator c = new KeyValue.MetaComparator(); + CellComparator c = KeyValue.META_COMPARATOR; assertTrue(c.compare(bbb, aaa) < 0); KeyValue aaaa = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,,1236023996656"), @@ -148,13 +151,13 @@ public class TestKeyValue extends TestCase { Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1236034574912L, (byte[])null); assertTrue(c.compare(x, y) < 0); - comparisons(new KeyValue.MetaComparator()); - comparisons(new KeyValue.KVComparator()); - metacomparisons(new KeyValue.MetaComparator()); + comparisons(KeyValue.META_COMPARATOR); + comparisons(KeyValue.COMPARATOR); + metacomparisons(KeyValue.META_COMPARATOR); } public void testMetaComparatorTableKeysWithCommaOk() { - MetaComparator c = new KeyValue.MetaComparator(); + CellComparator c = KeyValue.META_COMPARATOR; long now = System.currentTimeMillis(); // meta keys values are not quite right. A users can enter illegal values // from shell when scanning meta. @@ -185,7 +188,7 @@ public class TestKeyValue extends TestCase { } - private void metacomparisons(final KeyValue.MetaComparator c) { + private void metacomparisons(final CellComparator c) { long now = System.currentTimeMillis(); assertTrue(c.compare(new KeyValue( Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now), @@ -202,7 +205,7 @@ public class TestKeyValue extends TestCase { Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)) > 0); } - private void comparisons(final KeyValue.KVComparator c) { + private void comparisons(final CellComparator c) { long now = System.currentTimeMillis(); assertTrue(c.compare(new KeyValue( Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now), @@ -245,7 +248,7 @@ public class TestKeyValue extends TestCase { } assertTrue(assertion); // Make set with good comparator - set = new TreeSet(new KeyValue.MetaComparator()); + set = new TreeSet(KeyValue.META_COMPARATOR); Collections.addAll(set, keys); count = 0; for (KeyValue k: set) { @@ -267,7 +270,7 @@ public class TestKeyValue extends TestCase { private final byte[] qualA = Bytes.toBytes("qfA"); private final byte[] qualB = Bytes.toBytes("qfB"); - private void assertKVLess(KeyValue.KVComparator c, + private void assertKVLess(CellComparator c, KeyValue less, KeyValue greater) { int cmp = c.compare(less,greater); @@ -276,20 +279,20 @@ public class TestKeyValue extends TestCase { assertTrue(cmp > 0); } - private void assertKVLessWithoutRow(KeyValue.KVComparator c, int common, KeyValue less, + private void assertKVLessWithoutRow(CellComparator c, int common, KeyValue less, KeyValue greater) { - int cmp = c.compareIgnoringPrefix(common, less.getBuffer(), less.getOffset() + int cmp = KeyValueUtil.compareIgnoringPrefix(common, less.getBuffer(), less.getOffset() + KeyValue.ROW_OFFSET, less.getKeyLength(), greater.getBuffer(), greater.getOffset() + KeyValue.ROW_OFFSET, greater.getKeyLength()); assertTrue(cmp < 0); - cmp = c.compareIgnoringPrefix(common, greater.getBuffer(), greater.getOffset() + cmp = KeyValueUtil.compareIgnoringPrefix(common, greater.getBuffer(), greater.getOffset() + KeyValue.ROW_OFFSET, greater.getKeyLength(), less.getBuffer(), less.getOffset() + KeyValue.ROW_OFFSET, less.getKeyLength()); assertTrue(cmp > 0); } public void testCompareWithoutRow() { - final KeyValue.KVComparator c = KeyValue.COMPARATOR; + final CellComparator c = KeyValue.COMPARATOR; byte[] row = Bytes.toBytes("row"); byte[] fa = Bytes.toBytes("fa"); @@ -336,7 +339,7 @@ public class TestKeyValue extends TestCase { } public void testFirstLastOnRow() { - final KVComparator c = KeyValue.COMPARATOR; + final CellComparator c = KeyValue.COMPARATOR; long ts = 1; byte[] bufferA = new byte[128]; int offsetA = 0; @@ -410,7 +413,7 @@ public class TestKeyValue extends TestCase { byte[] tmpArr = new byte[initialPadding + endingPadding + keyLen]; System.arraycopy(kv.getBuffer(), kv.getKeyOffset(), tmpArr, initialPadding, keyLen); - KeyValue kvFromKey = KeyValue.createKeyValueFromKey(tmpArr, initialPadding, + KeyValue kvFromKey = KeyValueUtil.createKeyValueFromKey(tmpArr, initialPadding, keyLen); assertEquals(keyLen, kvFromKey.getKeyLength()); assertEquals(KeyValue.ROW_OFFSET + keyLen, kvFromKey.getBuffer().length); @@ -439,7 +442,7 @@ public class TestKeyValue extends TestCase { * See HBASE-7845 */ public void testGetShortMidpointKey() { - final KVComparator keyComparator = KeyValue.COMPARATOR; + final CellComparator keyComparator = KeyValue.COMPARATOR; //verify that faked shorter rowkey could be generated long ts = 5; KeyValue kv1 = new KeyValue(Bytes.toBytes("the quick brown fox"), family, qualA, ts, Type.Put); @@ -473,14 +476,14 @@ public class TestKeyValue extends TestCase { newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey()); assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0); assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) < 0); - KeyValue newKeyValue = KeyValue.createKeyValueFromKey(newKey); + KeyValue newKeyValue = KeyValueUtil.createKeyValueFromKey(newKey); assertTrue(Arrays.equals(newKeyValue.getFamily(),family)); assertTrue(Arrays.equals(newKeyValue.getQualifier(),qualB)); assertTrue(newKeyValue.getTimestamp() == HConstants.LATEST_TIMESTAMP); assertTrue(newKeyValue.getTypeByte() == Type.Maximum.getCode()); //verify metaKeyComparator's getShortMidpointKey output - final KVComparator metaKeyComparator = KeyValue.META_COMPARATOR; + final CellComparator metaKeyComparator = KeyValue.META_COMPARATOR; kv1 = new KeyValue(Bytes.toBytes("ilovehbase123"), family, qualA, 5, Type.Put); kv2 = new KeyValue(Bytes.toBytes("ilovehbase234"), family, qualA, 0, Type.Put); newKey = metaKeyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey()); @@ -573,7 +576,7 @@ public class TestKeyValue extends TestCase { } public void testMetaKeyComparator() { - MetaComparator c = new KeyValue.MetaComparator(); + CellComparator c = KeyValue.META_COMPARATOR; long now = System.currentTimeMillis(); KeyValue a = new KeyValue(Bytes.toBytes("table1"), now); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java index 922de6f..1ce4bf6 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java @@ -29,6 +29,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -112,13 +113,13 @@ public class TestCellCodec { Codec.Decoder decoder = codec.getDecoder(dis); assertTrue(decoder.advance()); Cell c = decoder.current(); - assertTrue(CellComparator.equals(c, kv1)); + assertTrue(CellUtil.equals(c, kv1)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv2)); + assertTrue(CellUtil.equals(c, kv2)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv3)); + assertTrue(CellUtil.equals(c, kv3)); assertFalse(decoder.advance()); dis.close(); assertEquals(offset, cis.getCount()); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java index 30f2f00..beff87a 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java @@ -29,7 +29,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; @@ -76,7 +76,7 @@ public class TestCellCodecWithTags { Codec.Decoder decoder = codec.getDecoder(dis); assertTrue(decoder.advance()); Cell c = decoder.current(); - assertTrue(CellComparator.equals(c, cell1)); + assertTrue(CellUtil.equals(c, cell1)); List tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(2, tags.size()); Tag tag = tags.get(0); @@ -87,7 +87,7 @@ public class TestCellCodecWithTags { assertTrue(Bytes.equals(Bytes.toBytes("teststring2"), tag.getValue())); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, cell2)); + assertTrue(CellUtil.equals(c, cell2)); tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(1, tags.size()); tag = tags.get(0); @@ -95,7 +95,7 @@ public class TestCellCodecWithTags { assertTrue(Bytes.equals(Bytes.toBytes("teststring3"), tag.getValue())); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, cell3)); + assertTrue(CellUtil.equals(c, cell3)); tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(3, tags.size()); tag = tags.get(0); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java index 007647a..04fb9a9 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java @@ -29,7 +29,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; @@ -76,7 +76,7 @@ public class TestKeyValueCodecWithTags { Codec.Decoder decoder = codec.getDecoder(dis); assertTrue(decoder.advance()); Cell c = decoder.current(); - assertTrue(CellComparator.equals(c, kv1)); + assertTrue(CellUtil.equals(c, kv1)); List tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(2, tags.size()); Tag tag = tags.get(0); @@ -87,7 +87,7 @@ public class TestKeyValueCodecWithTags { assertTrue(Bytes.equals(Bytes.toBytes("teststring2"), tag.getValue())); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv2)); + assertTrue(CellUtil.equals(c, kv2)); tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(1, tags.size()); tag = tags.get(0); @@ -95,7 +95,7 @@ public class TestKeyValueCodecWithTags { assertTrue(Bytes.equals(Bytes.toBytes("teststring3"), tag.getValue())); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv3)); + assertTrue(CellUtil.equals(c, kv3)); tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(3, tags.size()); tag = tags.get(0); diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java index a548b8a..e6f1b3e 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -52,6 +53,6 @@ public class TestPBCell { pbr.setPosition(0); decoded = CODEC.decode(pbr); assertEquals(encodedLength, pbr.getPosition()); - assertTrue(CellComparator.equals(cell, ProtobufUtil.toCell(decoded))); + assertTrue(CellUtil.equals(cell, ProtobufUtil.toCell(decoded))); } } diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java index afcb526..71f053d 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java @@ -25,9 +25,9 @@ import java.nio.ByteBuffer; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.KeyValue.MetaComparator; +import org.apache.hadoop.hbase.CellComparator.MetaCellComparator; import org.apache.hadoop.hbase.KeyValue.RawBytesComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.codec.prefixtree.decode.DecoderFactory; @@ -58,7 +58,7 @@ import org.apache.hadoop.io.WritableUtils; * created and recycled by static PtEncoderFactory and PtDecoderFactory. */ @InterfaceAudience.Private -public class PrefixTreeCodec implements DataBlockEncoder{ +public class PrefixTreeCodec implements DataBlockEncoder { /** * no-arg constructor for reflection @@ -150,10 +150,11 @@ public class PrefixTreeCodec implements DataBlockEncoder{ * the way to this point. */ @Override - public EncodedSeeker createSeeker(KVComparator comparator, HFileBlockDecodingContext decodingCtx) { + public EncodedSeeker createSeeker(CellComparator comparator, + HFileBlockDecodingContext decodingCtx) { if (comparator instanceof RawBytesComparator){ - throw new IllegalArgumentException("comparator must be KeyValue.KeyComparator"); - } else if (comparator instanceof MetaComparator){ + throw new IllegalArgumentException("comparator must be CellComparator"); + } else if (comparator instanceof MetaCellComparator){ throw new IllegalArgumentException("DataBlockEncoding.PREFIX_TREE not compatible with hbase:meta " +"table"); } diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java index b95055c..a2b347c 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java @@ -21,9 +21,9 @@ package org.apache.hadoop.hbase.codec.prefixtree; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.SettableSequenceId; @@ -242,10 +242,8 @@ public class PrefixTreeSeeker implements EncodedSeeker { } @Override - public int compareKey(KVComparator comparator, byte[] key, int offset, int length) { - // can't optimize this, make a copy of the key - ByteBuffer bb = getKeyDeepCopy(); - return comparator.compareFlatKey(key, offset, length, bb.array(), bb.arrayOffset(), bb.limit()); + public int compareKey(CellComparator comparator, byte[] key, int offset, int length) { + return compareKey(comparator, new KeyValue.KeyOnlyKeyValue(key, offset, length)); } @Override @@ -258,7 +256,8 @@ public class PrefixTreeSeeker implements EncodedSeeker { } @Override - public int compareKey(KVComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, Cell key) { + // can't optimize this, make a copy of the key ByteBuffer bb = getKeyDeepCopy(); return comparator.compare(key, new KeyValue.KeyOnlyKeyValue(bb.array(), bb.arrayOffset(), bb.limit())); diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java index cb7eeea..b5d64f5 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java @@ -61,7 +61,6 @@ public class PrefixTreeArrayScanner extends PrefixTreeCell implements CellScanne protected boolean nubCellsRemain; protected int currentCellIndex; - /*********************** construct ******************************/ // pass in blockMeta so we can initialize buffers big enough for all cells in the block @@ -420,7 +419,7 @@ public class PrefixTreeArrayScanner extends PrefixTreeCell implements CellScanne protected int populateNonRowFieldsAndCompareTo(int cellNum, Cell key) { populateNonRowFields(cellNum); - return CellComparator.compare(this, key, true); + return comparator.compareOnlyKeyPortion(this, key); } protected void populateFirstNonRowFields() { diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java index 97eed62..92b5920 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java @@ -33,6 +33,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; */ @InterfaceAudience.Private public class PrefixTreeCell implements Cell, SettableSequenceId, Comparable { + // Create a reference here? Can be removed too + protected CellComparator comparator = KeyValue.COMPARATOR; /********************** static **********************/ @@ -91,7 +93,7 @@ public class PrefixTreeCell implements Cell, SettableSequenceId, Comparable metaNames = new ArrayList(); @@ -110,7 +110,7 @@ public abstract class AbstractHFileWriter implements HFile.Writer { public AbstractHFileWriter(CacheConfig cacheConf, FSDataOutputStream outputStream, Path path, - KVComparator comparator, HFileContext fileContext) { + CellComparator comparator, HFileContext fileContext) { this.outputStream = outputStream; this.path = path; this.name = path != null ? path.getName() : outputStream.toString(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 56510f0..f02be4b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -29,8 +29,8 @@ import java.nio.ByteBuffer; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.protobuf.generated.HFileProtos; import org.apache.hadoop.hbase.util.Bytes; @@ -107,7 +107,8 @@ public class FixedFileTrailer { private long lastDataBlockOffset; /** Raw key comparator class name in version 3 */ - private String comparatorClassName = KeyValue.COMPARATOR.getLegacyKeyComparatorName(); + private String comparatorClassName = (KeyValue.COMPARATOR) + .getLegacyKeyComparatorName(); /** The encryption key */ private byte[] encryptionKey; @@ -536,16 +537,18 @@ public class FixedFileTrailer { return minorVersion; } - public void setComparatorClass(Class klass) { + public void setComparatorClass(Class klass) { // Is the comparator instantiable? try { - KVComparator comp = klass.newInstance(); + CellComparator comp = klass.newInstance(); // HFile V2 legacy comparator class names. if (KeyValue.COMPARATOR.getClass().equals(klass)) { - comparatorClassName = KeyValue.COMPARATOR.getLegacyKeyComparatorName(); + comparatorClassName = (KeyValue.COMPARATOR) + .getLegacyKeyComparatorName(); } else if (KeyValue.META_COMPARATOR.getClass().equals(klass)) { - comparatorClassName = KeyValue.META_COMPARATOR.getLegacyKeyComparatorName(); + comparatorClassName = (KeyValue.META_COMPARATOR) + .getLegacyKeyComparatorName(); } else if (KeyValue.RAW_COMPARATOR.getClass().equals(klass)) { comparatorClassName = KeyValue.RAW_COMPARATOR.getLegacyKeyComparatorName(); } else { @@ -561,13 +564,15 @@ public class FixedFileTrailer { } @SuppressWarnings("unchecked") - private static Class getComparatorClass( + private static Class getComparatorClass( String comparatorClassName) throws IOException { try { // HFile V2 legacy comparator class names. - if (comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName())) { + if (comparatorClassName.equals((KeyValue.COMPARATOR) + .getLegacyKeyComparatorName())) { comparatorClassName = KeyValue.COMPARATOR.getClass().getName(); - } else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName())) { + } else if (comparatorClassName.equals((KeyValue.META_COMPARATOR) + .getLegacyKeyComparatorName())) { comparatorClassName = KeyValue.META_COMPARATOR.getClass().getName(); } else if (comparatorClassName.equals(KeyValue.RAW_COMPARATOR.getLegacyKeyComparatorName())) { comparatorClassName = KeyValue.RAW_COMPARATOR.getClass().getName(); @@ -575,14 +580,14 @@ public class FixedFileTrailer { // if the name wasn't one of the legacy names, maybe its a legit new kind of comparator. - return (Class) + return (Class) Class.forName(comparatorClassName); } catch (ClassNotFoundException ex) { throw new IOException(ex); } } - public static KVComparator createComparator( + public static CellComparator createComparator( String comparatorClassName) throws IOException { try { return getComparatorClass(comparatorClassName).newInstance(); @@ -595,7 +600,7 @@ public class FixedFileTrailer { } } - KVComparator createComparator() throws IOException { + CellComparator createComparator() throws IOException { expectAtLeastMajorVersion(2); return createComparator(comparatorClassName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 610fe7f..da4932b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -35,8 +35,12 @@ import java.util.Map; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -48,9 +52,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.compress.Compression; @@ -61,13 +65,13 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HFileProtos; import org.apache.hadoop.hbase.util.BloomFilterWriter; -import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.Writable; import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; /** * File format for hbase. @@ -248,7 +252,8 @@ public class HFile { protected FileSystem fs; protected Path path; protected FSDataOutputStream ostream; - protected KVComparator comparator = KeyValue.COMPARATOR; + protected CellComparator comparator = + KeyValue.COMPARATOR; protected InetSocketAddress[] favoredNodes; private HFileContext fileContext; @@ -271,7 +276,7 @@ public class HFile { return this; } - public WriterFactory withComparator(KVComparator comparator) { + public WriterFactory withComparator(CellComparator comparator) { Preconditions.checkNotNull(comparator); this.comparator = comparator; return this; @@ -301,7 +306,7 @@ public class HFile { } protected abstract Writer createWriter(FileSystem fs, Path path, FSDataOutputStream ostream, - KVComparator comparator, HFileContext fileContext) throws IOException; + CellComparator comparator, HFileContext fileContext) throws IOException; } /** The configuration key for HFile version to use for new files */ @@ -381,7 +386,7 @@ public class HFile { */ String getName(); - KVComparator getComparator(); + CellComparator getComparator(); HFileScanner getScanner(boolean cacheBlocks, final boolean pread, final boolean isCompaction); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 77266df..7437460 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -107,7 +107,7 @@ public class HFileBlockIndex { */ public static class BlockIndexReader implements HeapSize { /** Needed doing lookup on blocks. */ - private final KVComparator comparator; + private final CellComparator comparator; // Root-level data. private byte[][] blockKeys; @@ -132,13 +132,13 @@ public class HFileBlockIndex { /** A way to read {@link HFile} blocks at a given offset */ private CachingBlockReader cachingBlockReader; - public BlockIndexReader(final KVComparator c, final int treeLevel, + public BlockIndexReader(final CellComparator c, final int treeLevel, final CachingBlockReader cachingBlockReader) { this(c, treeLevel); this.cachingBlockReader = cachingBlockReader; } - public BlockIndexReader(final KVComparator c, final int treeLevel) + public BlockIndexReader(final CellComparator c, final int treeLevel) { comparator = c; searchTreeLevel = treeLevel; @@ -507,7 +507,7 @@ public class HFileBlockIndex { * @throws IOException */ static int binarySearchNonRootIndex(Cell key, ByteBuffer nonRootIndex, - KVComparator comparator) { + CellComparator comparator) { int numEntries = nonRootIndex.getInt(0); int low = 0; @@ -596,7 +596,7 @@ public class HFileBlockIndex { * */ static int locateNonRootIndexEntry(ByteBuffer nonRootBlock, Cell key, - KVComparator comparator) { + CellComparator comparator) { int entryIndex = binarySearchNonRootIndex(key, nonRootBlock, comparator); if (entryIndex != -1) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 7b92df9..ba8ee4b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -317,7 +317,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { do { Cell cell = scanner.getKeyValue(); if (row != null && row.length != 0) { - int result = CellComparator.compareRows(cell.getRowArray(), cell.getRowOffset(), + int result = KeyValue.COMPARATOR.compareRows(cell, cell.getRowOffset(), cell.getRowLength(), row, 0, row.length); if (result > 0) { break; @@ -348,7 +348,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { } // check if rows are in order if (checkRow && pCell != null) { - if (CellComparator.compareRows(pCell, cell) > 0) { + if (KeyValue.COMPARATOR.compareRows(pCell, cell) > 0) { System.err.println("WARNING, previous row is greater then" + " current row\n\tfilename -> " + file + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> " @@ -364,7 +364,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { + "\n\tfilename -> " + file + "\n\tkeyvalue -> " + CellUtil.getCellKeyAsString(cell)); } - if (pCell != null && CellComparator.compareFamilies(pCell, cell) != 0) { + if (pCell != null && KeyValue.COMPARATOR.compareFamilies(pCell, cell) != 0) { System.err.println("WARNING, previous kv has different family" + " compared to current key\n\tfilename -> " + file + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index c0e3e91..f082b3f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; @@ -723,10 +723,10 @@ public class HFileReaderV2 extends AbstractHFileReader { * @param length * @return -1 is the passed key is smaller than the current key, 0 if equal and 1 if greater */ - public abstract int compareKey(KVComparator comparator, byte[] key, int offset, + public abstract int compareKey(CellComparator comparator, byte[] key, int offset, int length); - public abstract int compareKey(KVComparator comparator, Cell kv); + public abstract int compareKey(CellComparator comparator, Cell kv); } /** @@ -768,7 +768,7 @@ public class HFileReaderV2 extends AbstractHFileReader { } @Override - public int compareKey(KVComparator comparator, byte[] key, int offset, int length) { + public int compareKey(CellComparator comparator, byte[] key, int offset, int length) { return comparator.compareFlatKey(key, offset, length, blockBuffer.array(), blockBuffer.arrayOffset() + blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen); } @@ -1072,7 +1072,7 @@ public class HFileReaderV2 extends AbstractHFileReader { } @Override - public int compareKey(KVComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, Cell key) { return comparator.compareOnlyKeyPortion( key, new KeyValue.KeyOnlyKeyValue(blockBuffer.array(), blockBuffer.arrayOffset() @@ -1192,7 +1192,7 @@ public class HFileReaderV2 extends AbstractHFileReader { } @Override - public int compareKey(KVComparator comparator, byte[] key, int offset, int length) { + public int compareKey(CellComparator comparator, byte[] key, int offset, int length) { return seeker.compareKey(comparator, key, offset, length); } @@ -1248,7 +1248,7 @@ public class HFileReaderV2 extends AbstractHFileReader { } @Override - public int compareKey(KVComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, Cell key) { return seeker.compareKey(comparator, key); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java index 28c4655..4ef7116 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java @@ -34,7 +34,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable; @@ -96,7 +95,7 @@ public class HFileWriterV2 extends AbstractHFileWriter { @Override public Writer createWriter(FileSystem fs, Path path, FSDataOutputStream ostream, - KVComparator comparator, HFileContext context) throws IOException { + CellComparator comparator, HFileContext context) throws IOException { context.setIncludesTags(false);// HFile V2 does not deal with tags at all! return new HFileWriterV2(conf, cacheConf, fs, path, ostream, comparator, context); @@ -106,7 +105,7 @@ public class HFileWriterV2 extends AbstractHFileWriter { /** Constructor that takes a path, creates and closes the output stream. */ public HFileWriterV2(Configuration conf, CacheConfig cacheConf, FileSystem fs, Path path, FSDataOutputStream ostream, - final KVComparator comparator, final HFileContext context) throws IOException { + final CellComparator comparator, final HFileContext context) throws IOException { super(cacheConf, ostream == null ? createOutputStream(conf, fs, path, null) : ostream, path, comparator, context); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java index 086395c..8c22e33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java @@ -27,8 +27,8 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; @@ -54,7 +54,7 @@ public class HFileWriterV3 extends HFileWriterV2 { @Override public Writer createWriter(FileSystem fs, Path path, FSDataOutputStream ostream, - final KVComparator comparator, HFileContext fileContext) + final CellComparator comparator, HFileContext fileContext) throws IOException { return new HFileWriterV3(conf, cacheConf, fs, path, ostream, comparator, fileContext); } @@ -62,7 +62,7 @@ public class HFileWriterV3 extends HFileWriterV2 { /** Constructor that takes a path, creates and closes the output stream. */ public HFileWriterV3(Configuration conf, CacheConfig cacheConf, FileSystem fs, Path path, - FSDataOutputStream ostream, final KVComparator comparator, + FSDataOutputStream ostream, final CellComparator comparator, final HFileContext fileContext) throws IOException { super(conf, cacheConf, fs, path, ostream, comparator, fileContext); if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java index 192afd8..ea7d33a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java @@ -24,6 +24,7 @@ import java.io.InputStream; import java.io.OutputStream; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; @@ -55,7 +56,7 @@ public class KeyValueSerialization implements Serialization { @Override public KeyValue deserialize(KeyValue ignore) throws IOException { // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO - return KeyValue.create(this.dis); + return KeyValueUtil.create(this.dis); } @Override @@ -79,7 +80,7 @@ public class KeyValueSerialization implements Serialization { @Override public void serialize(KeyValue kv) throws IOException { - KeyValue.write(kv, this.dos); + KeyValueUtil.write(kv, this.dos); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java index 4c3ab50..bfaced7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java @@ -27,6 +27,7 @@ import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -48,7 +49,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; public class CellSkipListSet implements NavigableSet { private final ConcurrentNavigableMap delegatee; - CellSkipListSet(final KeyValue.KVComparator c) { + CellSkipListSet(final CellComparator c) { this.delegatee = new ConcurrentSkipListMap(c); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 3da0c0b..3d13cc2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -84,7 +85,7 @@ public class DefaultMemStore implements MemStore { // Snapshot of memstore. Made for flusher. volatile CellSkipListSet snapshot; - final KeyValue.KVComparator comparator; + final CellComparator comparator; // Used to track own heapSize final AtomicLong size; @@ -112,7 +113,7 @@ public class DefaultMemStore implements MemStore { * @param c Comparator */ public DefaultMemStore(final Configuration conf, - final KeyValue.KVComparator c) { + final CellComparator c) { this.conf = conf; this.comparator = c; this.cellSet = new CellSkipListSet(c); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java index 3c1345d..519edde 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java @@ -24,8 +24,8 @@ import java.util.List; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; @@ -63,7 +63,7 @@ public class DefaultStoreEngine extends StoreEngine< @Override protected void createComponents( - Configuration conf, Store store, KVComparator kvComparator) throws IOException { + Configuration conf, Store store, CellComparator kvComparator) throws IOException { String className = conf.get(DEFAULT_COMPACTOR_CLASS_KEY, DEFAULT_COMPACTOR_CLASS.getName()); try { compactor = ReflectionUtils.instantiateWithCustomCtor(className, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java index 8305b99..2761e0a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java @@ -29,8 +29,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; @@ -45,7 +45,7 @@ import com.google.common.collect.Lists; class DefaultStoreFileManager implements StoreFileManager { static final Log LOG = LogFactory.getLog(DefaultStoreFileManager.class); - private final KVComparator kvComparator; + private final CellComparator kvComparator; private final CompactionConfiguration comConf; private final int blockingFileCount; @@ -55,7 +55,7 @@ class DefaultStoreFileManager implements StoreFileManager { */ private volatile ImmutableList storefiles = null; - public DefaultStoreFileManager(KVComparator kvComparator, Configuration conf, + public DefaultStoreFileManager(CellComparator kvComparator, Configuration conf, CompactionConfiguration comConf) { this.kvComparator = kvComparator; this.comConf = comConf; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java index 4d22c0e..07ffe77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.util.Bytes; /** @@ -45,7 +45,7 @@ class GetClosestRowBeforeTracker { private final long now; private final long oldestUnexpiredTs; private Cell candidate = null; - private final KVComparator kvcomparator; + private final CellComparator cellComparator; // Flag for whether we're doing getclosest on a metaregion. private final boolean metaregion; // Offset and length into targetkey demarking table name (if in a metaregion). @@ -62,7 +62,7 @@ class GetClosestRowBeforeTracker { * @param ttl Time to live in ms for this Store * @param metaregion True if this is hbase:meta or -ROOT- region. */ - GetClosestRowBeforeTracker(final KVComparator c, final KeyValue kv, + GetClosestRowBeforeTracker(final CellComparator c, final KeyValue kv, final long ttl, final boolean metaregion) { super(); this.metaregion = metaregion; @@ -72,13 +72,13 @@ class GetClosestRowBeforeTracker { this.rowoffset = kv.getRowOffset(); int l = -1; if (metaregion) { - l = KeyValue.getDelimiter(kv.getRowArray(), rowoffset, kv.getRowLength(), + l = CellUtil.getDelimiter(kv.getRowArray(), rowoffset, kv.getRowLength(), HConstants.DELIMITER) - this.rowoffset; } this.tablenamePlusDelimiterLength = metaregion? l + 1: -1; this.now = System.currentTimeMillis(); this.oldestUnexpiredTs = now - ttl; - this.kvcomparator = c; + this.cellComparator = c; this.deletes = new TreeMap>(new CellComparator.RowComparator()); } @@ -89,7 +89,7 @@ class GetClosestRowBeforeTracker { private void addDelete(final Cell kv) { NavigableSet rowdeletes = this.deletes.get(kv); if (rowdeletes == null) { - rowdeletes = new TreeSet(this.kvcomparator); + rowdeletes = new TreeSet(this.cellComparator); this.deletes.put(kv, rowdeletes); } rowdeletes.add(kv); @@ -109,8 +109,8 @@ class GetClosestRowBeforeTracker { boolean isBetterCandidate(final Cell contender) { return this.candidate == null || - (this.kvcomparator.compareRows(this.candidate, contender) < 0 && - this.kvcomparator.compareRows(contender, this.targetkey) <= 0); + (this.cellComparator.compareRows(this.candidate, contender) < 0 && + this.cellComparator.compareRows(contender, this.targetkey) <= 0); } /* @@ -231,7 +231,7 @@ class GetClosestRowBeforeTracker { * @return True if we went too far, past the target key. */ boolean isTooFar(final Cell kv, final Cell firstOnRow) { - return this.kvcomparator.compareRows(kv, firstOnRow) > 0; + return this.cellComparator.compareRows(kv, firstOnRow) > 0; } boolean isTargetTable(final Cell kv) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index b8e3e52..95eea75 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -70,6 +70,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompoundConfiguration; @@ -288,7 +289,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private final HRegionFileSystem fs; protected final Configuration conf; private final Configuration baseConf; - private final KeyValue.KVComparator comparator; + private final CellComparator comparator; private final int rowLockWaitDuration; static final int DEFAULT_ROWLOCK_WAIT_DURATION = 30000; @@ -1645,7 +1646,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi /** * @return KeyValue Comparator */ - public KeyValue.KVComparator getComparator() { + public CellComparator getComparator() { return this.comparator; } @@ -5691,6 +5692,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi protected boolean isStopRow(byte[] currentRow, int offset, short length) { return currentRow == null || (stopRow != null && + // TODO : currentRow can be tracked as cell rather than byte[] comparator.compareRows(stopRow, 0, stopRow.length, currentRow, offset, length) <= isScan); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 686df49..afd867b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; @@ -178,7 +179,7 @@ public class HStore implements Store { private int bytesPerChecksum; // Comparing KeyValues - private final KeyValue.KVComparator comparator; + private final CellComparator comparator; final StoreEngine storeEngine; @@ -245,7 +246,7 @@ public class HStore implements Store { scanInfo = new ScanInfo(family, ttl, timeToPurgeDeletes, this.comparator); String className = conf.get(MEMSTORE_CLASS_NAME, DefaultMemStore.class.getName()); this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { - Configuration.class, KeyValue.KVComparator.class }, new Object[] { conf, this.comparator }); + Configuration.class, CellComparator.class }, new Object[] { conf, this.comparator }); this.offPeakHours = OffPeakHours.getInstance(conf); // Setting up cache configuration for this family @@ -723,7 +724,7 @@ public class HStore implements Store { Preconditions.checkState(firstKey != null, "First key can not be null"); byte[] lk = reader.getLastKey(); Preconditions.checkState(lk != null, "Last key can not be null"); - byte[] lastKey = KeyValue.createKeyValueFromKey(lk).getRow(); + byte[] lastKey = KeyValueUtil.createKeyValueFromKey(lk).getRow(); LOG.debug("HFile bounds: first=" + Bytes.toStringBinary(firstKey) + " last=" + Bytes.toStringBinary(lastKey)); @@ -752,13 +753,13 @@ public class HStore implements Store { do { Cell cell = scanner.getKeyValue(); if (prevCell != null) { - if (CellComparator.compareRows(prevCell, cell) > 0) { + if (comparator.compareRows(prevCell, cell) > 0) { throw new InvalidHFileException("Previous row is greater than" + " current row: path=" + srcPath + " previous=" + CellUtil.getCellKeyAsString(prevCell) + " current=" + CellUtil.getCellKeyAsString(cell)); } - if (CellComparator.compareFamilies(prevCell, cell) != 0) { + if (comparator.compareFamilies(prevCell, cell) != 0) { throw new InvalidHFileException("Previous key had different" + " family compared to current key: path=" + srcPath + " previous=" @@ -1850,9 +1851,9 @@ public class HStore implements Store { // TODO: Cache these keys rather than make each time? byte [] fk = r.getFirstKey(); if (fk == null) return false; - KeyValue firstKV = KeyValue.createKeyValueFromKey(fk, 0, fk.length); + KeyValue firstKV = KeyValueUtil.createKeyValueFromKey(fk, 0, fk.length); byte [] lk = r.getLastKey(); - KeyValue lastKV = KeyValue.createKeyValueFromKey(lk, 0, lk.length); + KeyValue lastKV = KeyValueUtil.createKeyValueFromKey(lk, 0, lk.length); KeyValue firstOnRow = state.getTargetKey(); if (this.comparator.compareRows(lastKV, firstOnRow) < 0) { // If last key in file is not of the target table, no candidates in this @@ -2310,7 +2311,7 @@ public class HStore implements Store { } @Override - public KeyValue.KVComparator getComparator() { + public CellComparator getComparator() { return comparator; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index beb23cf..d86e0e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -25,7 +25,7 @@ import java.util.List; import java.util.PriorityQueue; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; /** @@ -66,7 +66,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner * @param comparator */ public KeyValueHeap(List scanners, - KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { this(scanners, new KVScannerComparator(comparator)); } @@ -184,12 +184,12 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner } protected static class KVScannerComparator implements Comparator { - protected KVComparator kvComparator; + protected CellComparator kvComparator; /** * Constructor * @param kvComparator */ - public KVScannerComparator(KVComparator kvComparator) { + public KVScannerComparator(CellComparator kvComparator) { this.kvComparator = kvComparator; } public int compare(KeyValueScanner left, KeyValueScanner right) { @@ -222,7 +222,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner /** * @return KVComparator */ - public KVComparator getComparator() { + public CellComparator getComparator() { return this.kvComparator; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java index c7ce180..3005e37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java @@ -24,8 +24,8 @@ import java.util.List; import org.apache.commons.lang.NotImplementedException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.KeyValue.KVComparator; /** * ReversedKeyValueHeap is used for supporting reversed scanning. Compared with @@ -43,7 +43,7 @@ public class ReversedKeyValueHeap extends KeyValueHeap { * @throws IOException */ public ReversedKeyValueHeap(List scanners, - KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { super(scanners, new ReversedKVScannerComparator(comparator)); } @@ -77,8 +77,8 @@ public class ReversedKeyValueHeap extends KeyValueHeap { KeyValueScanner scanner; while ((scanner = heap.poll()) != null) { Cell topKey = scanner.peek(); - if (comparator.getComparator().compareRows(topKey.getRowArray(), - topKey.getRowOffset(), topKey.getRowLength(), seekKey.getRowArray(), + if (comparator.getComparator().compareRows(topKey, + topKey.getRowOffset(), topKey.getRowLength(), seekKey, seekKey.getRowOffset(), seekKey.getRowLength()) < 0) { // Row of Top KeyValue is before Seek row. heap.add(scanner); @@ -162,7 +162,7 @@ public class ReversedKeyValueHeap extends KeyValueHeap { * Constructor * @param kvComparator */ - public ReversedKVScannerComparator(KVComparator kvComparator) { + public ReversedKVScannerComparator(CellComparator kvComparator) { super(kvComparator); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java index feda699..b3472ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java @@ -58,6 +58,8 @@ class ReversedRegionScannerImpl extends RegionScannerImpl { @Override protected boolean isStopRow(byte[] currentRow, int offset, short length) { return currentRow == null + // TODO : When BB backed cell also comes we could track the currentRow as Cell + // rather than byte[] || (super.stopRow != null && region.getComparator().compareRows( stopRow, 0, stopRow.length, currentRow, offset, length) >= super.isScan); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java index e319f90..27c4391 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java @@ -24,10 +24,9 @@ import java.util.NavigableSet; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.client.Scan; /** @@ -63,7 +62,7 @@ class ReversedStoreScanner extends StoreScanner implements KeyValueScanner { @Override protected void resetKVHeap(List scanners, - KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { // Combine all seeked scanners with a heap heap = new ReversedKeyValueHeap(scanners, comparator); } @@ -100,7 +99,7 @@ class ReversedStoreScanner extends StoreScanner implements KeyValueScanner { @Override protected void checkScanOrder(Cell prevKV, Cell kv, - KeyValue.KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { // Check that the heap gives us KVs in an increasing order for same row and // decreasing order for different rows. assert prevKV == null || comparator == null || comparator.compareRows(kv, prevKV) < 0 diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java index a8b314e..83a1bdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java @@ -19,9 +19,9 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -36,7 +36,7 @@ public class ScanInfo { private long ttl; private KeepDeletedCells keepDeletedCells; private long timeToPurgeDeletes; - private KVComparator comparator; + private CellComparator comparator; public static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT + (2 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_INT) @@ -50,7 +50,7 @@ public class ScanInfo { * @param comparator The store's comparator */ public ScanInfo(final HColumnDescriptor family, final long ttl, final long timeToPurgeDeletes, - final KVComparator comparator) { + final CellComparator comparator) { this(family.getName(), family.getMinVersions(), family.getMaxVersions(), ttl, family .getKeepDeletedCells(), timeToPurgeDeletes, comparator); } @@ -67,7 +67,7 @@ public class ScanInfo { */ public ScanInfo(final byte[] family, final int minVersions, final int maxVersions, final long ttl, final KeepDeletedCells keepDeletedCells, final long timeToPurgeDeletes, - final KVComparator comparator) { + final CellComparator comparator) { this.family = family; this.minVersions = minVersions; this.maxVersions = maxVersions; @@ -101,7 +101,7 @@ public class ScanInfo { return timeToPurgeDeletes; } - public KVComparator getComparator() { + public CellComparator getComparator() { return comparator; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index 032b4ce..77847a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -25,6 +25,7 @@ import java.util.NavigableSet; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; @@ -86,7 +87,7 @@ public class ScanQueryMatcher { private final Cell startKey; /** Row comparator for the region this query is for */ - private final KeyValue.KVComparator rowComparator; + private final CellComparator rowComparator; /* row is not private for tests */ /** Row the query is on */ @@ -278,8 +279,8 @@ public class ScanQueryMatcher { if (filter != null && filter.filterAllRemaining()) { return MatchCode.DONE_SCAN; } - int ret = this.rowComparator.compareRows(row, this.rowOffset, this.rowLength, - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + int ret = (-this.rowComparator.compareRows(cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength(), row, this.rowOffset, this.rowLength)); if (!this.isReversed) { if (ret <= -1) { return MatchCode.DONE; @@ -495,7 +496,7 @@ public class ScanQueryMatcher { public boolean moreRowsMayExistAfter(Cell kv) { if (this.isReversed) { - if (rowComparator.compareRows(kv.getRowArray(), kv.getRowOffset(), + if (rowComparator.compareRows(kv, kv.getRowOffset(), kv.getRowLength(), stopRow, 0, stopRow.length) <= 0) { return false; } else { @@ -503,7 +504,7 @@ public class ScanQueryMatcher { } } if (!Bytes.equals(stopRow , HConstants.EMPTY_END_ROW) && - rowComparator.compareRows(kv.getRowArray(),kv.getRowOffset(), + rowComparator.compareRows(kv, kv.getRowOffset(), kv.getRowLength(), stopRow, 0, stopRow.length) >= 0) { // KV >= STOPROW // then NO there is nothing left. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index a77fc0e..6859c97 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -59,7 +60,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf int NO_PRIORITY = Integer.MIN_VALUE; // General Accessors - KeyValue.KVComparator getComparator(); + CellComparator getComparator(); Collection getStorefiles(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java index 519767c..2164031 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java @@ -24,7 +24,7 @@ import java.util.List; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.Compactor; @@ -97,10 +97,10 @@ public abstract class StoreEngine create( - Store store, Configuration conf, KVComparator kvComparator) throws IOException { + Store store, Configuration conf, CellComparator kvComparator) throws IOException { String className = conf.get(STORE_ENGINE_CLASS_KEY, DEFAULT_STORE_ENGINE_CLASS.getName()); try { StoreEngine se = ReflectionUtils.instantiateWithCustomCtor( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index c1a6b76..4511ca2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; @@ -539,7 +539,7 @@ public class StoreFile { private final CacheConfig cacheConf; private final FileSystem fs; - private KeyValue.KVComparator comparator = KeyValue.COMPARATOR; + private CellComparator comparator = KeyValue.COMPARATOR; private BloomType bloomType = BloomType.NONE; private long maxKeyCount = 0; private Path dir; @@ -586,7 +586,7 @@ public class StoreFile { return this; } - public WriterBuilder withComparator(KeyValue.KVComparator comparator) { + public WriterBuilder withComparator(CellComparator comparator) { Preconditions.checkNotNull(comparator); this.comparator = comparator; return this; @@ -671,7 +671,7 @@ public class StoreFile { * @return The split point row, or null if splitting is not possible, or reader is null. */ @SuppressWarnings("deprecation") - byte[] getFileSplitPoint(KVComparator comparator) throws IOException { + byte[] getFileSplitPoint(CellComparator comparator) throws IOException { if (this.reader == null) { LOG.warn("Storefile " + this + " Reader is null; cannot get split point"); return null; @@ -681,11 +681,11 @@ public class StoreFile { // the row we want to split on as midkey. byte [] midkey = this.reader.midkey(); if (midkey != null) { - KeyValue mk = KeyValue.createKeyValueFromKey(midkey, 0, midkey.length); + KeyValue mk = KeyValueUtil.createKeyValueFromKey(midkey, 0, midkey.length); byte [] fk = this.reader.getFirstKey(); - KeyValue firstKey = KeyValue.createKeyValueFromKey(fk, 0, fk.length); + KeyValue firstKey = KeyValueUtil.createKeyValueFromKey(fk, 0, fk.length); byte [] lk = this.reader.getLastKey(); - KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length); + KeyValue lastKey = KeyValueUtil.createKeyValueFromKey(lk, 0, lk.length); // if the midkey is the same as the first or last keys, we cannot (ever) split this region. if (comparator.compareRows(mk, firstKey) == 0 || comparator.compareRows(mk, lastKey) == 0) { if (LOG.isDebugEnabled()) { @@ -708,7 +708,7 @@ public class StoreFile { private final BloomType bloomType; private byte[] lastBloomKey; private int lastBloomKeyOffset, lastBloomKeyLen; - private KVComparator kvComparator; + private CellComparator kvComparator; private Cell lastCell = null; private long earliestPutTs = HConstants.LATEST_TIMESTAMP; private Cell lastDeleteFamilyCell = null; @@ -744,7 +744,7 @@ public class StoreFile { private Writer(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf, - final KVComparator comparator, BloomType bloomType, long maxKeys, + final CellComparator comparator, BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, HFileContext fileContext) throws IOException { writer = HFile.getWriterFactory(conf, cacheConf) @@ -1051,7 +1051,7 @@ public class StoreFile { this.reader = null; } - public KVComparator getComparator() { + public CellComparator getComparator() { return reader.getComparator(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index a8ee091..d83b48c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -369,7 +370,7 @@ public class StoreFileScanner implements KeyValueScanner { return reader; } - KeyValue.KVComparator getComparator() { + CellComparator getComparator() { return reader.getComparator(); } @@ -477,8 +478,8 @@ public class StoreFileScanner implements KeyValueScanner { public boolean backwardSeek(Cell key) throws IOException { seek(key); if (cur == null - || getComparator().compareRows(cur.getRowArray(), cur.getRowOffset(), - cur.getRowLength(), key.getRowArray(), key.getRowOffset(), + || getComparator().compareRows(cur, cur.getRowOffset(), + cur.getRowLength(), key, key.getRowOffset(), key.getRowLength()) > 0) { return seekToPreviousRow(key); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 298d5bc..7de211a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -31,11 +31,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.IsolationLevel; @@ -349,7 +349,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } protected void resetKVHeap(List scanners, - KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { // Combine all seeked scanners with a heap heap = new KeyValueHeap(scanners, comparator); } @@ -502,7 +502,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner Cell cell; // Only do a sanity-check if store and comparator are available. - KeyValue.KVComparator comparator = + CellComparator comparator = store != null ? store.getComparator() : null; int count = 0; @@ -752,7 +752,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * @throws IOException */ protected void checkScanOrder(Cell prevKV, Cell kv, - KeyValue.KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { // Check that the heap gives us KVs in an increasing order. assert prevKV == null || comparator == null || comparator.compare(prevKV, kv) <= 0 : "Key " + prevKV diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java index 5b4c4db..f4d0b8b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java @@ -27,7 +27,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.regionserver.compactions.Compactor; import org.apache.hadoop.hbase.util.Bytes; @@ -40,7 +40,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { /** Factory that is used to produce single StoreFile.Writer-s */ protected WriterFactory writerFactory; - protected KVComparator comparator; + protected CellComparator comparator; protected List existingWriters; protected List boundaries; @@ -60,7 +60,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { * @param factory Factory used to produce individual file writers. * @param comparator Comparator used to compare rows. */ - public void init(StoreScanner sourceScanner, WriterFactory factory, KVComparator comparator) + public void init(StoreScanner sourceScanner, WriterFactory factory, CellComparator comparator) throws IOException { this.writerFactory = factory; this.sourceScanner = sourceScanner; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java index b910527..68b654e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java @@ -26,8 +26,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy; @@ -57,7 +57,7 @@ public class StripeStoreEngine extends StoreEngine data; - final KeyValue.KVComparator comparator; + final CellComparator comparator; private Iterator iter; private Cell current; @@ -45,7 +46,7 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner { } public CollectionBackedScanner(SortedSet set, - KeyValue.KVComparator comparator) { + CellComparator comparator) { this.comparator = comparator; data = set; init(); @@ -56,14 +57,14 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner { } public CollectionBackedScanner(List list, - KeyValue.KVComparator comparator) { + CellComparator comparator) { Collections.sort(list, comparator); this.comparator = comparator; data = list; init(); } - public CollectionBackedScanner(KeyValue.KVComparator comparator, + public CollectionBackedScanner(CellComparator comparator, Cell... array) { this.comparator = comparator; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java index beda805..fb8369d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java @@ -23,8 +23,8 @@ import java.io.DataInput; import java.io.IOException; import java.nio.ByteBuffer; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -132,7 +132,7 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase } @Override - public KVComparator getComparator() { + public CellComparator getComparator() { return comparator; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java index af9fa00..b11e4bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java @@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.util; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; @InterfaceAudience.Private @@ -52,7 +52,7 @@ public class CompoundBloomFilterBase implements BloomFilterBase { protected int hashType; /** Comparator used to compare Bloom filter keys */ - protected KVComparator comparator; + protected CellComparator comparator; @Override public long getMaxKeys() { @@ -90,7 +90,7 @@ public class CompoundBloomFilterBase implements BloomFilterBase { } @Override - public KVComparator getComparator() { + public CellComparator getComparator() { return comparator; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java index d436a98..a018cee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java @@ -28,7 +28,7 @@ import java.util.Queue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex; import org.apache.hadoop.hbase.io.hfile.InlineBlockWriter; @@ -89,7 +89,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase */ public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, int hashType, int maxFold, boolean cacheOnWrite, - KVComparator comparator) { + CellComparator comparator) { chunkByteSize = ByteBloomFilter.computeFoldableByteSize( chunkByteSizeHint * 8L, maxFold); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index cdef12f..ab5f792 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -139,7 +140,7 @@ public class CompressionTest { scanner.seekTo(); // position to the start of file // Scanner does not do Cells yet. Do below for now till fixed. cc = scanner.getKeyValue(); - if (CellComparator.compareRows(c, cc) != 0) { + if (KeyValue.COMPARATOR.compareRows(c, cc) != 0) { throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString()); } } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 1fb64a2..46ec9e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -84,6 +84,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.RegionLocations; @@ -855,9 +856,9 @@ public class HBaseFsck extends Configured implements Closeable { CacheConfig cacheConf = new CacheConfig(getConf()); hf = HFile.createReader(fs, hfile.getPath(), cacheConf, getConf()); hf.loadFileInfo(); - KeyValue startKv = KeyValue.createKeyValueFromKey(hf.getFirstKey()); + KeyValue startKv = KeyValueUtil.createKeyValueFromKey(hf.getFirstKey()); start = startKv.getRow(); - KeyValue endKv = KeyValue.createKeyValueFromKey(hf.getLastKey()); + KeyValue endKv = KeyValueUtil.createKeyValueFromKey(hf.getLastKey()); end = endKv.getRow(); } catch (IOException ioe) { LOG.warn("Problem reading orphan file " + hfile + ", skipping"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index c29a460..1075528 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -69,12 +69,12 @@ public class TestSerialization { KeyValue kv = new KeyValue(row, fam, qf, ts, val); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); - long l = KeyValue.write(kv, dos); + long l = KeyValueUtil.write(kv, dos); dos.close(); byte [] mb = baos.toByteArray(); ByteArrayInputStream bais = new ByteArrayInputStream(mb); DataInputStream dis = new DataInputStream(bais); - KeyValue deserializedKv = KeyValue.create(dis); + KeyValue deserializedKv = KeyValueUtil.create(dis); assertTrue(Bytes.equals(kv.getBuffer(), deserializedKv.getBuffer())); assertEquals(kv.getOffset(), deserializedKv.getOffset()); assertEquals(kv.getLength(), deserializedKv.getLength()); @@ -104,7 +104,7 @@ public class TestSerialization { DataInputStream dis = new DataInputStream(bais); try { - KeyValue.create(dis); + KeyValueUtil.create(dis); assertTrue(kv_0.equals(kv_1)); } catch (Exception e) { fail("Unexpected Exception" + e.getMessage()); @@ -113,7 +113,7 @@ public class TestSerialization { // length -1 try { // even if we have a good kv now in dis we will just pass length with -1 for simplicity - KeyValue.create(-1, dis); + KeyValueUtil.create(-1, dis); fail("Expected corrupt stream"); } catch (Exception e) { assertEquals("Failed read -1 bytes, stream corrupt?", e.getMessage()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java index b51de80..73fbd99 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -107,13 +108,13 @@ public class TestCellMessageCodec { Codec.Decoder decoder = cmc.getDecoder(dis); assertTrue(decoder.advance()); Cell c = decoder.current(); - assertTrue(CellComparator.equals(c, kv1)); + assertTrue(CellUtil.equals(c, kv1)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv2)); + assertTrue(CellUtil.equals(c, kv2)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv3)); + assertTrue(CellUtil.equals(c, kv3)); assertFalse(decoder.advance()); dis.close(); assertEquals(offset, cis.getCount()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 18595a8..4c321cb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -101,7 +101,7 @@ public class TestHalfStoreFileReader { HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf); r.loadFileInfo(); byte [] midkey = r.midkey(); - KeyValue midKV = KeyValue.createKeyValueFromKey(midkey); + KeyValue midKV = KeyValueUtil.createKeyValueFromKey(midkey); midkey = midKV.getRow(); //System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey)); @@ -167,7 +167,7 @@ public class TestHalfStoreFileReader { HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf); r.loadFileInfo(); byte[] midkey = r.midkey(); - KeyValue midKV = KeyValue.createKeyValueFromKey(midkey); + KeyValue midKV = KeyValueUtil.createKeyValueFromKey(midkey); midkey = midKV.getRow(); Reference bottom = new Reference(midkey, Reference.Range.bottom); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java index 1b6731a..f62a029 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java @@ -18,6 +18,10 @@ */ package org.apache.hadoop.hbase.io.hfile; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; @@ -27,6 +31,12 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -38,15 +48,6 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import static org.junit.Assert.*; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; - @RunWith(Parameterized.class) @Category({IOTests.class, SmallTests.class}) public class TestFixedFileTrailer { @@ -275,7 +276,7 @@ public class TestFixedFileTrailer { assertEquals(expected.getFirstDataBlockOffset(), loaded.getFirstDataBlockOffset()); assertTrue( - expected.createComparator() instanceof KeyValue.KVComparator); + expected.createComparator() instanceof CellComparator); assertEquals(expected.getUncompressedDataIndexSize(), loaded.getUncompressedDataIndexSize()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 3855629..ba86157 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFile.Reader; @@ -251,7 +252,7 @@ public class TestHFile extends HBaseTestCase { Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(fout) .withFileContext(meta) - .withComparator(new KeyValue.KVComparator()) + .withComparator(KeyValue.COMPARATOR) .create(); LOG.info(writer); writeRecords(writer, useTags); @@ -267,18 +268,18 @@ public class TestHFile extends HBaseTestCase { // Align scanner at start of the file. scanner.seekTo(); readAllRecords(scanner); - int seekTo = scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(50))); + int seekTo = scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(50))); System.out.println(seekTo); assertTrue("location lookup failed", - scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(50))) == 0); + scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(50))) == 0); // read the key and see if it matches ByteBuffer readKey = scanner.getKey(); assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50), Bytes.toBytes(readKey))); - scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(0))); + scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(0))); ByteBuffer val1 = scanner.getValue(); - scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(0))); + scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(0))); ByteBuffer val2 = scanner.getValue(); assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index 939c019..9a8791d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -45,6 +45,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -640,7 +641,7 @@ public class TestHFileBlockIndex { private void checkSeekTo(byte[][] keys, HFileScanner scanner, int i) throws IOException { assertEquals("Failed to seek to key #" + i + " (" + Bytes.toStringBinary(keys[i]) + ")", 0, - scanner.seekTo(KeyValue.createKeyValueFromKey(keys[i]))); + scanner.seekTo(KeyValueUtil.createKeyValueFromKey(keys[i]))); } private void assertArrayEqualsBuffer(String msgPrefix, byte[] arr, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java index 76a8200..4462477 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RawLocalFileSystem; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.hfile.HFile.Reader; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -195,7 +196,7 @@ public class TestHFileSeek extends TestCase { kSampler.next(key); byte [] k = new byte [key.getLength()]; System.arraycopy(key.getBytes(), 0, k, 0, key.getLength()); - if (scanner.seekTo(KeyValue.createKeyValueFromKey(k)) >= 0) { + if (scanner.seekTo(KeyValueUtil.createKeyValueFromKey(k)) >= 0) { ByteBuffer bbkey = scanner.getKey(); ByteBuffer bbval = scanner.getValue(); totalBytes += bbkey.limit(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java index 42e918a..03128ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java @@ -38,9 +38,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; @@ -148,7 +148,7 @@ public class TestHFileWriterV2 { HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(fsdis, fileSize, meta); // Comparator class name is stored in the trailer in version 2. - KVComparator comparator = trailer.createComparator(); + CellComparator comparator = trailer.createComparator(); HFileBlockIndex.BlockIndexReader dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator, trailer.getNumDataIndexLevels()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java index f96e8ef..2efa55e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java @@ -38,10 +38,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; @@ -178,7 +178,7 @@ public class TestHFileWriterV3 { HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(fsdis, fileSize, meta); // Comparator class name is stored in the trailer in version 2. - KVComparator comparator = trailer.createComparator(); + CellComparator comparator = trailer.createComparator(); HFileBlockIndex.BlockIndexReader dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator, trailer.getNumDataIndexLevels()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java index 53c55be..5fd5f4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java @@ -19,13 +19,13 @@ package org.apache.hadoop.hbase.regionserver; -import org.apache.hadoop.hbase.regionserver.KeyValueScanner; -import org.apache.hadoop.hbase.util.CollectionBackedScanner; -import org.apache.hadoop.hbase.KeyValue; - import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.util.CollectionBackedScanner; + /** * A fixture that implements and presents a KeyValueScanner. * It takes a list of key/values which is then sorted according @@ -33,7 +33,7 @@ import java.util.List; * to be a store file scanner. */ public class KeyValueScanFixture extends CollectionBackedScanner { - public KeyValueScanFixture(KeyValue.KVComparator comparator, + public KeyValueScanFixture(CellComparator comparator, KeyValue... incData) { super(comparator, incData); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java index 3a12674..f99226f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java @@ -23,8 +23,8 @@ import java.util.Map; import java.util.TreeMap; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.util.Bytes; @@ -54,7 +54,7 @@ public class MockStoreFile extends StoreFile { } @Override - byte[] getFileSplitPoint(KVComparator comparator) throws IOException { + byte[] getFileSplitPoint(CellComparator comparator) throws IOException { return this.splitPoint; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java index d7b4a04..2557b48 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.client.Scan; @@ -354,8 +355,8 @@ public class TestCompoundBloomFilter { row, 0, 0); byte[] rowColKey = cbfb.createBloomKey(row, 0, row.length, qualifier, 0, qualifier.length); - KeyValue rowKV = KeyValue.createKeyValueFromKey(rowKey); - KeyValue rowColKV = KeyValue.createKeyValueFromKey(rowColKey); + KeyValue rowKV = KeyValueUtil.createKeyValueFromKey(rowKey); + KeyValue rowColKV = KeyValueUtil.createKeyValueFromKey(rowColKey); assertEquals(rowKV.getTimestamp(), rowColKV.getTimestamp()); assertEquals(Bytes.toStringBinary(rowKV.getRow()), Bytes.toStringBinary(rowColKV.getRow())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index e1e5b89..11e0760 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -534,7 +534,7 @@ public class TestDefaultMemStore extends TestCase { byte[] row1 = Bytes.toBytes(rowId); assertTrue( "Row name", - KeyValue.COMPARATOR.compareRows(left.getRowArray(), left.getRowOffset(), + KeyValue.COMPARATOR.compareRows(left, left.getRowOffset(), (int) left.getRowLength(), row1, 0, row1.length) == 0); assertEquals("Count of columns", QUALIFIER_COUNT, results.size()); List row = new ArrayList(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java index c185075..2ce77f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java @@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; @@ -60,7 +60,7 @@ public class TestDefaultStoreEngine { conf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, DummyStoreFlusher.class.getName()); Store mockStore = Mockito.mock(Store.class); - StoreEngine se = StoreEngine.create(mockStore, conf, new KVComparator()); + StoreEngine se = StoreEngine.create(mockStore, conf, new CellComparator()); Assert.assertTrue(se instanceof DefaultStoreEngine); Assert.assertTrue(se.getCompactionPolicy() instanceof DummyCompactionPolicy); Assert.assertTrue(se.getStoreFlusher() instanceof DummyStoreFlusher); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 416ee28..f6cb376 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -172,9 +172,9 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { } private byte [] extractRowFromMetaRow(final byte [] b) { - int firstDelimiter = KeyValue.getDelimiter(b, 0, b.length, + int firstDelimiter = CellUtil.getDelimiter(b, 0, b.length, HConstants.DELIMITER); - int lastDelimiter = KeyValue.getDelimiterInReverse(b, 0, b.length, + int lastDelimiter = CellUtil.getDelimiterInReverse(b, 0, b.length, HConstants.DELIMITER); int length = lastDelimiter - firstDelimiter - 1; byte [] row = new byte[length]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 6a5e844..e82f7b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -2701,7 +2701,7 @@ public class TestHRegion { res = new ArrayList(); is.next(res); for (int i = 0; i < res.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected1.get(i), res.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected1.get(i), res.get(i))); } // Result 2 @@ -2712,7 +2712,7 @@ public class TestHRegion { res = new ArrayList(); is.next(res); for (int i = 0; i < res.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected2.get(i), res.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected2.get(i), res.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -2834,7 +2834,7 @@ public class TestHRegion { // Verify result for (int i = 0; i < expected.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -2916,7 +2916,7 @@ public class TestHRegion { // Verify result for (int i = 0; i < expected.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -3038,7 +3038,7 @@ public class TestHRegion { // Verify result for (int i = 0; i < expected.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -3165,7 +3165,7 @@ public class TestHRegion { // Verify result for (int i = 0; i < expected.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java index 9b8dabf..1dc20fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java @@ -53,7 +53,7 @@ import org.mortbay.log.Log; public class TestHRegionOnCluster { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - @Test (timeout=300000) + @Test (timeout=3000000) public void testDataCorrectnessReplayingRecoveredEdits() throws Exception { final int NUM_MASTERS = 1; final int NUM_RS = 3; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java index 28d3ab9..7964427 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java @@ -38,7 +38,6 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -267,7 +266,7 @@ public class TestMultiColumnScanner { assertTrue("Scanner returned additional key/value: " + kv + ", " + queryInfo + deleteInfo + ";", kvPos < kvs.size()); assertTrue("Scanner returned wrong key/value; " + queryInfo - + deleteInfo + ";", CellComparator.equalsIgnoreMvccVersion(kvs.get(kvPos), (kv))); + + deleteInfo + ";", CellUtil.equalsIgnoreMvccVersion(kvs.get(kvPos), (kv))); ++kvPos; ++numResults; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java index 2df2f5a..515ee73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java @@ -27,11 +27,11 @@ import java.util.ArrayList; import java.util.List; import java.util.NavigableSet; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -64,7 +64,7 @@ public class TestQueryMatcher extends HBaseTestCase { private Get get; long ttl = Long.MAX_VALUE; - KVComparator rowComparator; + CellComparator rowComparator; private Scan scan; @Before diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java index 3d651ef..363dc38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; @@ -154,14 +155,15 @@ public class TestRecoveredEdits { Cell previous = null; for (Cell cell: val.getCells()) { if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) continue; - if (previous != null && CellComparator.compareRows(previous, cell) == 0) continue; + if (previous != null && KeyValue.COMPARATOR.compareRows(previous, cell) == 0) + continue; previous = cell; Get g = new Get(CellUtil.cloneRow(cell)); Result r = region.get(g); boolean found = false; for (CellScanner scanner = r.cellScanner(); scanner.advance();) { Cell current = scanner.current(); - if (CellComparator.compare(cell, current, true) == 0) { + if (KeyValue.COMPARATOR.compare(cell, current, true) == 0) { found = true; break; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index 59793e0..d5a9229 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -48,6 +50,7 @@ import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -55,7 +58,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -986,7 +988,7 @@ public class TestStore { public static DefaultCompactor lastCreatedCompactor = null; @Override protected void createComponents( - Configuration conf, Store store, KVComparator comparator) throws IOException { + Configuration conf, Store store, CellComparator comparator) throws IOException { super.createComponents(conf, store, comparator); lastCreatedCompactor = this.compactor; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index e5a5022..be8d999 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -170,9 +170,9 @@ public class TestStoreFile extends HBaseTestCase { // Split on a row, not in middle of row. Midkey returned by reader // may be in middle of row. Create new one with empty column and // timestamp. - KeyValue kv = KeyValue.createKeyValueFromKey(reader.midkey()); + KeyValue kv = KeyValueUtil.createKeyValueFromKey(reader.midkey()); byte [] midRow = kv.getRow(); - kv = KeyValue.createKeyValueFromKey(reader.getLastKey()); + kv = KeyValueUtil.createKeyValueFromKey(reader.getLastKey()); byte [] finalRow = kv.getRow(); // Make a reference HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow); @@ -184,7 +184,7 @@ public class TestStoreFile extends HBaseTestCase { HFileScanner s = refHsf.createReader().getScanner(false, false); for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) { ByteBuffer bb = s.getKey(); - kv = KeyValue.createKeyValueFromKey(bb); + kv = KeyValueUtil.createKeyValueFromKey(bb); if (first) { assertTrue(Bytes.equals(kv.getRow(), midRow)); first = false; @@ -315,7 +315,7 @@ public class TestStoreFile extends HBaseTestCase { private void checkHalfHFile(final HRegionFileSystem regionFs, final StoreFile f) throws IOException { byte [] midkey = f.createReader().midkey(); - KeyValue midKV = KeyValue.createKeyValueFromKey(midkey); + KeyValue midKV = KeyValueUtil.createKeyValueFromKey(midkey); byte [] midRow = midKV.getRow(); // Create top split. HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), @@ -398,7 +398,7 @@ public class TestStoreFile extends HBaseTestCase { key.arrayOffset(), key.limit(), badmidkey, 0, badmidkey.length) >= 0); if (first) { first = false; - KeyValue keyKV = KeyValue.createKeyValueFromKey(key); + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("First top when key < bottom: " + keyKV); String tmp = Bytes.toString(keyKV.getRow()); for (int i = 0; i < tmp.length(); i++) { @@ -406,7 +406,7 @@ public class TestStoreFile extends HBaseTestCase { } } } - KeyValue keyKV = KeyValue.createKeyValueFromKey(key); + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("Last top when key < bottom: " + keyKV); String tmp = Bytes.toString(keyKV.getRow()); for (int i = 0; i < tmp.length(); i++) { @@ -430,7 +430,7 @@ public class TestStoreFile extends HBaseTestCase { key = bottomScanner.getKey(); if (first) { first = false; - keyKV = KeyValue.createKeyValueFromKey(key); + keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("First bottom when key > top: " + keyKV); tmp = Bytes.toString(keyKV.getRow()); for (int i = 0; i < tmp.length(); i++) { @@ -438,7 +438,7 @@ public class TestStoreFile extends HBaseTestCase { } } } - keyKV = KeyValue.createKeyValueFromKey(key); + keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("Last bottom when key > top: " + keyKV); for (int i = 0; i < tmp.length(); i++) { assertTrue(Bytes.toString(keyKV.getRow()).charAt(i) == 'z'); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java index 06bbd54..b7ba7ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java @@ -42,11 +42,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -188,7 +188,7 @@ public class TestStripeCompactor { // Create store mock that is satisfactory for compactor. HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS); - ScanInfo si = new ScanInfo(col, Long.MAX_VALUE, 0, new KVComparator()); + ScanInfo si = new ScanInfo(col, Long.MAX_VALUE, 0, new CellComparator()); Store store = mock(Store.class); when(store.getFamily()).thenReturn(col); when(store.getScanInfo()).thenReturn(si); @@ -197,7 +197,7 @@ public class TestStripeCompactor { when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); - when(store.getComparator()).thenReturn(new KVComparator()); + when(store.getComparator()).thenReturn(new CellComparator()); return new StripeCompactor(conf, store) { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java index 32ab164..d9e3ea3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java @@ -33,8 +33,8 @@ import java.util.Arrays; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController; @@ -111,7 +111,7 @@ public class TestStripeStoreEngine { private static TestStoreEngine createEngine(Configuration conf) throws Exception { Store store = mock(Store.class); - KVComparator kvComparator = mock(KVComparator.class); + CellComparator kvComparator = mock(CellComparator.class); return (TestStoreEngine)StoreEngine.create(store, conf, kvComparator); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java index 48f93e0..fa60ab1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java @@ -36,11 +36,11 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -590,7 +590,7 @@ public class TestStripeStoreFileManager { ArrayList sfs, Configuration conf) throws Exception { StripeStoreConfig config = new StripeStoreConfig( conf, Mockito.mock(StoreConfigInformation.class)); - StripeStoreFileManager result = new StripeStoreFileManager(new KVComparator(), conf, config); + StripeStoreFileManager result = new StripeStoreFileManager(new CellComparator(), conf, config); result.loadFiles(sfs); return result; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index 3294f6d..116a5dd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -563,7 +563,7 @@ public class TestStripeCompactionPolicy { StoreFileWritersCapture writers = new StoreFileWritersCapture(); StripeStoreFlusher.StripeFlushRequest req = policy.selectFlush(si, input.length); StripeMultiFileWriter mw = req.createWriter(); - mw.init(null, writers, new KeyValue.KVComparator()); + mw.init(null, writers, KeyValue.COMPARATOR); for (KeyValue kv : input) { mw.append(kv); }