.../java/org/apache/hadoop/hbase/HRegionInfo.java | 5 +- .../org/apache/hadoop/hbase/client/Result.java | 11 +- .../org/apache/hadoop/hbase/filter/FilterList.java | 6 +- .../apache/hadoop/hbase/filter/ParseFilter.java | 6 +- .../hadoop/hbase/client/TestClientNoCluster.java | 4 +- .../apache/hadoop/hbase/client/TestOperation.java | 9 +- .../org/apache/hadoop/hbase/CellComparator.java | 716 ++++++++------- .../java/org/apache/hadoop/hbase/CellUtil.java | 308 ++++++- .../java/org/apache/hadoop/hbase/KeyValue.java | 961 +-------------------- .../org/apache/hadoop/hbase/KeyValueTestUtil.java | 2 +- .../java/org/apache/hadoop/hbase/KeyValueUtil.java | 123 ++- .../java/org/apache/hadoop/hbase/TableName.java | 7 +- .../apache/hadoop/hbase/codec/KeyValueCodec.java | 3 +- .../hadoop/hbase/codec/KeyValueCodecWithTags.java | 3 +- .../io/encoding/BufferedDataBlockEncoder.java | 50 +- .../hbase/io/encoding/CopyKeyDataBlockEncoder.java | 4 +- .../hadoop/hbase/io/encoding/DataBlockEncoder.java | 10 +- .../hbase/io/encoding/DiffKeyDeltaEncoder.java | 4 +- .../hbase/io/encoding/FastDiffDeltaEncoder.java | 4 +- .../hbase/io/encoding/PrefixKeyDeltaEncoder.java | 4 +- .../java/org/apache/hadoop/hbase/util/Bytes.java | 101 ++- .../hbase/util/test/RedundantKVGenerator.java | 3 +- .../apache/hadoop/hbase/TestCellComparator.java | 89 +- .../java/org/apache/hadoop/hbase/TestKeyValue.java | 149 ++-- .../apache/hadoop/hbase/codec/TestCellCodec.java | 7 +- .../hadoop/hbase/codec/TestCellCodecWithTags.java | 8 +- .../hbase/codec/TestKeyValueCodecWithTags.java | 8 +- .../org/apache/hadoop/hbase/util/TestBytes.java | 28 +- .../org/apache/hadoop/hbase/types/TestPBCell.java | 3 +- .../hbase/mapreduce/IntegrationTestImportTsv.java | 5 +- .../hbase/codec/prefixtree/PrefixTreeCodec.java | 13 +- .../hbase/codec/prefixtree/PrefixTreeSeeker.java | 12 +- .../prefixtree/decode/PrefixTreeArrayScanner.java | 4 +- .../codec/prefixtree/decode/PrefixTreeCell.java | 8 +- .../codec/prefixtree/row/BaseTestRowData.java | 4 +- .../prefixtree/row/TestPrefixTreeSearcher.java | 6 +- .../row/data/TestRowDataSearcherRowMiss.java | 14 +- .../prefixtree/row/data/TestRowDataSimple.java | 14 +- .../hadoop/hbase/io/HalfStoreFileReader.java | 10 +- .../hadoop/hbase/io/hfile/FixedFileTrailer.java | 47 +- .../org/apache/hadoop/hbase/io/hfile/HFile.java | 14 +- .../hadoop/hbase/io/hfile/HFileBlockIndex.java | 28 +- .../hadoop/hbase/io/hfile/HFilePrettyPrinter.java | 8 +- .../hadoop/hbase/io/hfile/HFileReaderImpl.java | 28 +- .../hadoop/hbase/io/hfile/HFileWriterFactory.java | 4 +- .../hadoop/hbase/io/hfile/HFileWriterImpl.java | 10 +- .../hadoop/hbase/mapreduce/HFileOutputFormat2.java | 3 +- .../hbase/mapreduce/KeyValueSerialization.java | 5 +- .../hbase/mapreduce/KeyValueSortReducer.java | 3 +- .../hadoop/hbase/mapreduce/PutSortReducer.java | 3 +- .../mapreduce/SimpleTotalOrderPartitioner.java | 2 +- .../hadoop/hbase/mapreduce/TextSortReducer.java | 3 +- .../hadoop/hbase/regionserver/CellSkipListSet.java | 4 +- .../hadoop/hbase/regionserver/DefaultMemStore.java | 7 +- .../hbase/regionserver/DefaultStoreEngine.java | 4 +- .../regionserver/DefaultStoreFileManager.java | 6 +- .../regionserver/GetClosestRowBeforeTracker.java | 17 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 6 +- .../hbase/regionserver/HRegionFileSystem.java | 6 +- .../apache/hadoop/hbase/regionserver/HStore.java | 15 +- .../hadoop/hbase/regionserver/KeyValueHeap.java | 10 +- .../hbase/regionserver/ReversedKeyValueHeap.java | 10 +- .../regionserver/ReversedRegionScannerImpl.java | 2 + .../hbase/regionserver/ReversedStoreScanner.java | 9 +- .../apache/hadoop/hbase/regionserver/ScanInfo.java | 10 +- .../hbase/regionserver/ScanQueryMatcher.java | 40 +- .../apache/hadoop/hbase/regionserver/Store.java | 4 +- .../hadoop/hbase/regionserver/StoreEngine.java | 8 +- .../hadoop/hbase/regionserver/StoreFile.java | 94 +- .../hbase/regionserver/StoreFileScanner.java | 11 +- .../hadoop/hbase/regionserver/StoreScanner.java | 8 +- .../hbase/regionserver/StripeMultiFileWriter.java | 41 +- .../hbase/regionserver/StripeStoreEngine.java | 4 +- .../hbase/regionserver/StripeStoreFileManager.java | 17 +- .../hbase/regionserver/wal/WALCellCodec.java | 3 +- .../hadoop/hbase/regionserver/wal/WALEdit.java | 6 +- .../org/apache/hadoop/hbase/util/BloomFilter.java | 3 +- .../apache/hadoop/hbase/util/BloomFilterBase.java | 7 - .../hadoop/hbase/util/BloomFilterFactory.java | 6 +- .../apache/hadoop/hbase/util/ByteBloomFilter.java | 16 +- .../hadoop/hbase/util/CollectionBackedScanner.java | 14 +- .../hadoop/hbase/util/CompoundBloomFilter.java | 10 +- .../hadoop/hbase/util/CompoundBloomFilterBase.java | 13 +- .../hbase/util/CompoundBloomFilterWriter.java | 4 +- .../apache/hadoop/hbase/util/CompressionTest.java | 2 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 5 +- .../apache/hadoop/hbase/HBaseTestingUtility.java | 2 +- .../org/apache/hadoop/hbase/MetaMockingUtil.java | 2 +- .../org/apache/hadoop/hbase/TestSerialization.java | 8 +- .../org/apache/hadoop/hbase/client/TestResult.java | 17 +- .../hadoop/hbase/codec/TestCellMessageCodec.java | 7 +- .../hbase/filter/TestDependentColumnFilter.java | 3 +- .../org/apache/hadoop/hbase/filter/TestFilter.java | 9 +- .../apache/hadoop/hbase/filter/TestFilterList.java | 15 +- .../filter/TestSingleColumnValueExcludeFilter.java | 5 +- .../hadoop/hbase/io/TestHalfStoreFileReader.java | 7 +- .../hbase/io/encoding/TestDataBlockEncoders.java | 5 +- .../hbase/io/encoding/TestPrefixTreeEncoding.java | 15 +- .../io/encoding/TestSeekToBlockWithEncoders.java | 3 +- .../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 3 +- .../hbase/io/hfile/TestFixedFileTrailer.java | 25 +- .../apache/hadoop/hbase/io/hfile/TestHFile.java | 12 +- .../hadoop/hbase/io/hfile/TestHFileBlock.java | 3 +- .../hadoop/hbase/io/hfile/TestHFileBlockIndex.java | 24 +- .../hadoop/hbase/io/hfile/TestHFileSeek.java | 3 +- .../hadoop/hbase/io/hfile/TestHFileWriterV2.java | 6 +- .../hadoop/hbase/io/hfile/TestHFileWriterV3.java | 8 +- .../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 3 +- .../apache/hadoop/hbase/io/hfile/TestReseekTo.java | 3 +- .../apache/hadoop/hbase/io/hfile/TestSeekTo.java | 3 +- .../hbase/regionserver/KeyValueScanFixture.java | 12 +- .../hadoop/hbase/regionserver/MockStoreFile.java | 4 +- .../hbase/regionserver/TestCellSkipListSet.java | 3 +- .../regionserver/TestCompoundBloomFilter.java | 13 +- .../hbase/regionserver/TestDefaultMemStore.java | 14 +- .../hbase/regionserver/TestDefaultStoreEngine.java | 4 +- .../regionserver/TestGetClosestAtOrBefore.java | 4 +- .../hadoop/hbase/regionserver/TestHRegion.java | 12 +- .../hbase/regionserver/TestKeyValueHeap.java | 9 +- .../regionserver/TestKeyValueScanFixture.java | 3 +- .../hbase/regionserver/TestMultiColumnScanner.java | 4 +- .../hbase/regionserver/TestQueryMatcher.java | 6 +- .../hbase/regionserver/TestRecoveredEdits.java | 6 +- .../hbase/regionserver/TestReversibleScanners.java | 5 +- .../hbase/regionserver/TestSeekOptimizations.java | 5 +- .../hadoop/hbase/regionserver/TestStore.java | 12 +- .../hadoop/hbase/regionserver/TestStoreFile.java | 26 +- .../hbase/regionserver/TestStoreScanner.java | 13 +- .../hbase/regionserver/TestStripeCompactor.java | 6 +- .../hbase/regionserver/TestStripeStoreEngine.java | 4 +- .../regionserver/TestStripeStoreFileManager.java | 4 +- .../compactions/TestStripeCompactionPolicy.java | 3 +- .../TestReplicationWALEntryFilters.java | 3 +- .../hadoop/hbase/util/TestByteBloomFilter.java | 2 +- 134 files changed, 1704 insertions(+), 1970 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 7a43a50..66373c8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -27,7 +27,6 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -871,9 +870,9 @@ public class HRegionInfo implements Comparable { /** * @return Comparator to use comparing {@link KeyValue}s. */ - public KVComparator getComparator() { + public CellComparator getComparator() { return isMetaRegion()? - KeyValue.META_COMPARATOR: KeyValue.COMPARATOR; + CellComparator.META_COMPARATOR: CellComparator.COMPARATOR; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index 5a9aff3..4c58da5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -31,6 +31,7 @@ import java.util.NavigableMap; import java.util.TreeMap; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; @@ -204,14 +205,14 @@ public class Result implements CellScannable, CellScanner { * Return the array of Cells backing this Result instance. * * The array is sorted from smallest -> largest using the - * {@link KeyValue#COMPARATOR}. + * {@link CellComparator#COMPARATOR}. * * The array only contains what your Get or Scan specifies and no more. * For example if you request column "A" 1 version you will have at most 1 * Cell in the array. If you request column "A" with 2 version you will * have at most 2 Cells, with the first one being the newer timestamp and * the second being the older timestamp (this is the sort order defined by - * {@link KeyValue#COMPARATOR}). If columns don't exist, they won't be + * {@link CellComparator#COMPARATOR}). If columns don't exist, they won't be * present in the result. Therefore if you ask for 1 version all columns, * it is safe to iterate over this array and expect to see 1 Cell for * each column and no more. @@ -237,7 +238,7 @@ public class Result implements CellScannable, CellScanner { /** * Return the Cells for the specific column. The Cells are sorted in - * the {@link KeyValue#COMPARATOR} order. That implies the first entry in + * the {@link CellComparator#COMPARATOR} order. That implies the first entry in * the list is the most recent column. If the query (Scan or Get) only * requested 1 version the list will contain at most 1 entry. If the column * did not exist in the result set (either the column does not exist @@ -282,7 +283,7 @@ public class Result implements CellScannable, CellScanner { family, qualifier); // pos === ( -(insertion point) - 1) - int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR); + int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.COMPARATOR); // never will exact match if (pos < 0) { pos = (pos+1) * -1; @@ -327,7 +328,7 @@ public class Result implements CellScannable, CellScanner { qualifier, qoffset, qlength); // pos === ( -(insertion point) - 1) - int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR); + int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.COMPARATOR); // never will exact match if (pos < 0) { pos = (pos+1) * -1; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java index ba1a818..a7856cd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -25,7 +25,7 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -216,7 +216,7 @@ final public class FilterList extends Filter { @Override public Cell transformCell(Cell c) throws IOException { - if (!CellComparator.equals(c, referenceCell)) { + if (!CellUtil.equals(c, referenceCell)) { throw new IllegalStateException("Reference Cell: " + this.referenceCell + " does not match: " + c); } @@ -415,7 +415,7 @@ final public class FilterList extends Filter { keyHint = curKeyHint; continue; } - if (KeyValue.COMPARATOR.compare(keyHint, curKeyHint) > 0) { + if (CellComparator.COMPARATOR.compare(keyHint, curKeyHint) > 0) { keyHint = curKeyHint; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index 8101f4a..5089308 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -32,7 +32,6 @@ import java.util.Stack; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; @@ -294,7 +293,7 @@ public class ParseFilter { * @return an ArrayList containing the arguments of the filter in the filter string */ public static ArrayList getFilterArguments (byte [] filterStringAsByteArray) { - int argumentListStartIndex = KeyValue.getDelimiter(filterStringAsByteArray, 0, + int argumentListStartIndex = Bytes.searchDelimiterIndex(filterStringAsByteArray, 0, filterStringAsByteArray.length, ParseConstants.LPAREN); if (argumentListStartIndex == -1) { @@ -818,7 +817,8 @@ public class ParseFilter { * @return the parsed arguments of the comparator as a 2D byte array */ public static byte [][] parseComparator (byte [] comparator) { - final int index = KeyValue.getDelimiter(comparator, 0, comparator.length, ParseConstants.COLON); + final int index = Bytes.searchDelimiterIndex(comparator, 0, comparator.length, + ParseConstants.COLON); if (index == -1) { throw new IllegalArgumentException("Incorrect comparator"); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index f085ace..29b32b3 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -32,12 +32,14 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hbase.CellComparator; import org.apache.commons.lang.NotImplementedException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -642,7 +644,7 @@ public class TestClientNoCluster extends Configured implements Tool { * Comparator for meta row keys. */ private static class MetaRowsComparator implements Comparator { - private final KeyValue.KVComparator delegate = new KeyValue.MetaComparator(); + private final CellComparator delegate = CellComparator.META_COMPARATOR; @Override public int compare(byte[] left, byte[] right) { return delegate.compareRows(left, 0, left.length, right, 0, right.length); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java index 96c4190d..37890f9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java @@ -29,6 +29,7 @@ import java.util.List; import java.util.Map; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -387,7 +388,7 @@ public class TestOperation { Assert.assertEquals(1984L, c.get(0).getTimestamp()); Assert.assertArrayEquals(VALUE, CellUtil.cloneValue(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimeStamp()); - Assert.assertEquals(0, KeyValue.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); + Assert.assertEquals(0, CellComparator.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); p = new Put(ROW); p.add(FAMILY, ByteBuffer.wrap(QUALIFIER), 2013L, null); @@ -396,7 +397,7 @@ public class TestOperation { Assert.assertEquals(2013L, c.get(0).getTimestamp()); Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimeStamp()); - Assert.assertEquals(0, KeyValue.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); + Assert.assertEquals(0, CellComparator.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); p = new Put(ByteBuffer.wrap(ROW)); p.add(FAMILY, ByteBuffer.wrap(QUALIFIER), 2001L, null); @@ -406,7 +407,7 @@ public class TestOperation { Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimeStamp()); - Assert.assertEquals(0, KeyValue.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); + Assert.assertEquals(0, CellComparator.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); p = new Put(ByteBuffer.wrap(ROW), 1970L); p.add(FAMILY, ByteBuffer.wrap(QUALIFIER), 2001L, null); @@ -416,7 +417,7 @@ public class TestOperation { Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(1970L, p.getTimeStamp()); - Assert.assertEquals(0, KeyValue.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); + Assert.assertEquals(0, CellComparator.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); } @Test diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java index 540c967..4be7120 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java @@ -19,8 +19,12 @@ package org.apache.hadoop.hbase; import java.io.Serializable; +import java.util.Arrays; import java.util.Comparator; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.KeyValue.RawBytesComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -41,24 +45,75 @@ import com.google.common.primitives.Longs; @InterfaceAudience.Private @InterfaceStability.Evolving public class CellComparator implements Comparator, Serializable { + static final Log LOG = LogFactory.getLog(CellComparator.class); private static final long serialVersionUID = -8760041766259623329L; + /** + * Comparator for plain key/values; i.e. non-catalog table key/values. Works on Key portion + * of KeyValue only. + */ + public static final CellComparator COMPARATOR = new CellComparator(); + /** + * A {@link CellComparator} for hbase:meta catalog table + * {@link KeyValue}s. + */ + public static final CellComparator META_COMPARATOR = new MetaCellComparator(); + + /** + * Needed for Bloom Filters. + */ + public static final CellComparator RAW_COMPARATOR = new RawBytesComparator(); + @Override public int compare(Cell a, Cell b) { return compare(a, b, false); } /** + * Compares only the key portion of a cell. It does not include the sequence id/mvcc of the + * cell + * @param left + * @param right + * @return an int greater than 0 if left is greater than right + * lesser than 0 if left is lesser than right + * equal to 0 if left is equal to right + */ + public final int compareOnlyKeyPortion(Cell left, Cell right) { + return compare(left, right, true); + } + + /** + * Used when a cell needs to be compared with a key byte[] such as cases of + * finding the index from the index block, bloom keys from the bloom blocks + * @param left + * @param key + * @param offset + * @param length + * @return an int greater than 0 if left is greater than right + * lesser than 0 if left is lesser than right + * equal to 0 if left is equal to right + */ + public final int compare(Cell left, byte[] key, int offset, int length) { + // row + short rrowlength = Bytes.toShort(key, offset); + int c = compareRows(left, key, offset + Bytes.SIZEOF_SHORT, rrowlength); + if (c != 0) return c; + + // Compare the rest of the two KVs without making any assumptions about + // the common prefix. This function will not compare rows anyway, so we + // don't need to tell it that the common prefix includes the row. + return compareWithoutRow(left, key, offset, length, rrowlength); + } + + /** * Compare cells. - * TODO: Replace with dynamic rather than static comparator so can change comparator - * implementation. * @param a * @param b * @param ignoreSequenceid True if we are to compare the key portion only and ignore * the sequenceid. Set to false to compare key and consider sequenceid. * @return 0 if equal, -1 if a < b, and +1 if a > b. */ - public static int compare(final Cell a, final Cell b, boolean ignoreSequenceid) { + public final int compare(final Cell a, final Cell b, boolean ignoreSequenceid) { // row int c = compareRows(a, b); if (c != 0) return c; @@ -75,302 +130,367 @@ public class CellComparator implements Comparator, Serializable { } } - public static int findCommonPrefixInRowPart(Cell left, Cell right, int rowCommonPrefix) { - return findCommonPrefix(left.getRowArray(), right.getRowArray(), left.getRowLength() - - rowCommonPrefix, right.getRowLength() - rowCommonPrefix, left.getRowOffset() - + rowCommonPrefix, right.getRowOffset() + rowCommonPrefix); + public final static int compareColumns(final Cell left, final Cell right) { + int lfoffset = left.getFamilyOffset(); + int rfoffset = right.getFamilyOffset(); + int lclength = left.getQualifierLength(); + int rclength = right.getQualifierLength(); + int lfamilylength = left.getFamilyLength(); + int rfamilylength = right.getFamilyLength(); + int diff = compareFamilies(left.getFamilyArray(), lfoffset, lfamilylength, + right.getFamilyArray(), rfoffset, rfamilylength); + if (diff != 0) { + return diff; + } else { + return compareQualifiers(left.getQualifierArray(), left.getQualifierOffset(), lclength, + right.getQualifierArray(), right.getQualifierOffset(), rclength); + } } - private static int findCommonPrefix(byte[] left, byte[] right, int leftLength, int rightLength, - int leftOffset, int rightOffset) { - int length = Math.min(leftLength, rightLength); - int result = 0; - - while (result < length && left[leftOffset + result] == right[rightOffset + result]) { - result++; + // More APIs variations can come based on needs - this is mainly to avoid parsing the + // offsets and lengths again from the cells + public final static int compareColumns(final Cell left, int lfamilyOffset, int lfamilylength, + int lqualOffset, int lQualLength, final Cell right, final int rfamilyOffset, + final int rfamilylength, final int rqualOffset, int rqualLength) { + int diff = compareFamilies(left.getFamilyArray(), lfamilyOffset, lfamilylength, + right.getFamilyArray(), rfamilyOffset, rfamilylength); + if (diff != 0) { + return diff; + } else { + return compareQualifiers(left.getQualifierArray(), lqualOffset, lQualLength, + right.getQualifierArray(), rqualOffset, rqualLength); } - return result; } - - public static int findCommonPrefixInFamilyPart(Cell left, Cell right, int familyCommonPrefix) { - return findCommonPrefix(left.getFamilyArray(), right.getFamilyArray(), left.getFamilyLength() - - familyCommonPrefix, right.getFamilyLength() - familyCommonPrefix, left.getFamilyOffset() - + familyCommonPrefix, right.getFamilyOffset() + familyCommonPrefix); + + // More APIs variations can come based on needs - this is mainly to avoid parsing the + // offsets and lengths again from the cells + public final static int compareColumns(final Cell left, final int lfamilyOffset, + final int lfamilylength, final int lqualOffset, final int lQualLength, final byte[] right, + final int rfamilyOffset, final int rfamilylength, final int rqualOffset, + final int rqualLength) { + int diff = compareFamilies(left.getFamilyArray(), lfamilyOffset, lfamilylength, right, + rfamilyOffset, rfamilylength); + if (diff != 0) { + return diff; + } else { + return compareQualifiers(left.getQualifierArray(), lqualOffset, lQualLength, right, + rqualOffset, rqualLength); + } } - public static int findCommonPrefixInQualifierPart(Cell left, Cell right, - int qualifierCommonPrefix) { - return findCommonPrefix(left.getQualifierArray(), right.getQualifierArray(), - left.getQualifierLength() - qualifierCommonPrefix, right.getQualifierLength() - - qualifierCommonPrefix, left.getQualifierOffset() + qualifierCommonPrefix, - right.getQualifierOffset() + qualifierCommonPrefix); + public final static int compareFamilies(Cell left, Cell right) { + return compareFamilies(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), + right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); } - /**************** equals ****************************/ + private final static int compareFamilies(Cell left, int lOffset, int lLength, Cell right, + int rOffset, int rLength) { + return compareFamilies(left.getFamilyArray(), lOffset, lLength, right.getFamilyArray(), + rOffset, rLength); + } - public static boolean equals(Cell a, Cell b){ - return equalsRow(a, b) - && equalsFamily(a, b) - && equalsQualifier(a, b) - && equalsTimestamp(a, b) - && equalsType(a, b); + public final static int compareFamilies(Cell left, int lOffset, int lLength, byte[] right, + int rOffset, int rLength) { + return compareFamilies(left.getFamilyArray(), lOffset, lLength, right, rOffset, rLength); } - public static boolean equalsRow(Cell a, Cell b){ - return Bytes.equals( - a.getRowArray(), a.getRowOffset(), a.getRowLength(), - b.getRowArray(), b.getRowOffset(), b.getRowLength()); + private final static int compareFamilies(byte[] leftFamily, int lFamOffset, int lFamLength, + byte[] rightFamily, int rFamOffset, int rFamLen) { + return Bytes.compareTo(leftFamily, lFamOffset, lFamLength, rightFamily, rFamOffset, rFamLen); } - public static boolean equalsFamily(Cell a, Cell b){ - return Bytes.equals( - a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(), - b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength()); + public final static int compareQualifiers(Cell left, Cell right) { + return compareQualifiers(left.getQualifierArray(), left.getQualifierOffset(), + left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), + right.getQualifierLength()); } - public static boolean equalsQualifier(Cell a, Cell b){ - return Bytes.equals( - a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(), - b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength()); + // Here we may need APIs which will explicitly accpet qualOffset, qualLength to avoid reparsing + // from the Cell + private final static int compareQualifiers(Cell left, int lOffset, int lLength, Cell right, + int rOffset, int rLength) { + return compareQualifiers(left.getQualifierArray(), lOffset, + lLength, right.getQualifierArray(), rOffset, + rLength); } - public static boolean equalsTimestamp(Cell a, Cell b){ - return a.getTimestamp() == b.getTimestamp(); + // Here we may need APIs which will explicitly accpet qualOffset, qualLength to avoid reparsing + // from the Cell + public final static int compareQualifiers(Cell left, int lOffset, int lLength, byte[] right, + int rOffset, int rLength) { + return compareQualifiers(left.getQualifierArray(), lOffset, + lLength, right, rOffset, + rLength); } - public static boolean equalsType(Cell a, Cell b){ - return a.getTypeByte() == b.getTypeByte(); + private static int compareQualifiers(byte[] leftCol, int lColOffset, int lColLength, + byte[] rightCol, int rColOffset, int rColLength) { + return Bytes.compareTo(leftCol, lColOffset, lColLength, rightCol, rColOffset, rColLength); } - public static int compareColumns(final Cell left, final Cell right) { - int lfoffset = left.getFamilyOffset(); - int rfoffset = right.getFamilyOffset(); - int lclength = left.getQualifierLength(); - int rclength = right.getQualifierLength(); - int lfamilylength = left.getFamilyLength(); - int rfamilylength = right.getFamilyLength(); - int diff = compare(left.getFamilyArray(), lfoffset, lfamilylength, right.getFamilyArray(), - rfoffset, rfamilylength); - if (diff != 0) { - return diff; - } else { - return compare(left.getQualifierArray(), left.getQualifierOffset(), lclength, - right.getQualifierArray(), right.getQualifierOffset(), rclength); + /** + * Compare columnFamily, qualifier, timestamp, and key type (everything + * except the row). This method is used both in the normal comparator and + * the "same-prefix" comparator. Note that we are assuming that row portions + * of both KVs have already been parsed and found identical, and we don't + * validate that assumption here. + * @param commonPrefix + * the length of the common prefix of the two key-values being + * compared, including row length and row + */ + // we will get one more API with BB here + private final int compareWithoutRow(Cell left, + byte[] right, int roffset, int rlength, short rowlength) { + /*** + * KeyValue Format and commonLength: + * |_keyLen_|_valLen_|_rowLen_|_rowKey_|_famiLen_|_fami_|_Quali_|.... + * ------------------|-------commonLength--------|-------------- + */ + int commonLength = KeyValue.ROW_LENGTH_SIZE + KeyValue.FAMILY_LENGTH_SIZE + rowlength; + + // commonLength + TIMESTAMP_TYPE_SIZE + int commonLengthWithTSAndType = KeyValue.TIMESTAMP_TYPE_SIZE + commonLength; + // ColumnFamily + Qualifier length. + int lcolumnlength = left.getFamilyLength() + left.getQualifierLength(); + int rcolumnlength = rlength - commonLengthWithTSAndType; + + byte ltype = left.getTypeByte(); + byte rtype = right[roffset + (rlength - 1)]; + + // If the column is not specified, the "minimum" key type appears the + // latest in the sorted order, regardless of the timestamp. This is used + // for specifying the last key/value in a given row, because there is no + // "lexicographically last column" (it would be infinitely long). The + // "maximum" key type does not need this behavior. + if (lcolumnlength == 0 && ltype == Type.Minimum.getCode()) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + if (rcolumnlength == 0 && rtype == Type.Minimum.getCode()) { + return -1; } - } - public static int compareFamilies(Cell left, Cell right) { - return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), - right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); - } + int lfamilyoffset = left.getFamilyOffset(); + int rfamilyoffset = commonLength + roffset; - public static int compareQualifiers(Cell left, Cell right) { - return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), - left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), - right.getQualifierLength()); - } + // Column family length. + int lfamilylength = left.getFamilyLength(); + int rfamilylength = right[rfamilyoffset - 1]; + // If left family size is not equal to right family size, we need not + // compare the qualifiers. + boolean sameFamilySize = (lfamilylength == rfamilylength); + if (!sameFamilySize) { + // comparing column family is enough. + return compareFamilies(left, lfamilyoffset, lfamilylength, right, + rfamilyoffset, rfamilylength); + } + // Compare family & qualifier together. + // Families are same. Compare on qualifiers. + int lQualOffset = left.getQualifierOffset(); + int lQualLength = left.getQualifierLength(); + int comparison = compareColumns(left, lfamilyoffset, lfamilylength, lQualOffset, lQualLength, + right, rfamilyoffset, rfamilylength, rfamilyoffset + rfamilylength, + (rcolumnlength - rfamilylength)); + if (comparison != 0) { + return comparison; + } - public int compareFlatKey(Cell left, Cell right) { - int compare = compareRows(left, right); + // // + // Next compare timestamps. + long rtimestamp = Bytes.toLong(right, roffset + (rlength - KeyValue.TIMESTAMP_TYPE_SIZE)); + int compare = CellUtil.compareTimestamps(left.getTimestamp(), rtimestamp); if (compare != 0) { return compare; } - return compareWithoutRow(left, right); + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & rtype) - (0xff & ltype); } /** * Do not use comparing rows from hbase:meta. Meta table Cells have schema (table,startrow,hash) * so can't be treated as plain byte arrays as this method does. */ - public static int compareRows(final Cell left, final Cell right) { - return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), + public final int compareRows(final Cell left, final Cell right) { + return compareRows(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right.getRowArray(), right.getRowOffset(), right.getRowLength()); } + public final int compareRows(Cell left, int loffset, int llength, Cell right, int roffset, + int rlength) { + // TODO : for BB based cells all the hasArray based checks would happen + // here. But we may have + // to end up in multiple APIs accepting byte[] and BBs + return compareRows(left.getRowArray(), loffset, llength, right.getRowArray(), roffset, + rlength); + } + + public final int compareRows(Cell left, byte[] right, int roffset, + int rlength) { + // TODO : for BB based cells all the hasArray based checks would happen + // here. But we may have + // to end up in multiple APIs accepting byte[] and BBs + return compareRows(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right, + roffset, rlength); + } /** * Do not use comparing rows from hbase:meta. Meta table Cells have schema (table,startrow,hash) * so can't be treated as plain byte arrays as this method does. */ - public static int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, + // TODO : CLEANUP : in order to do this we may have to modify some code + // HRegion.next() and will involve a + // Filter API change also. Better to do that later along with + // HBASE-11425/HBASE-13387. + public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, int rlength) { return Bytes.compareTo(left, loffset, llength, right, roffset, rlength); } - public static int compareWithoutRow(final Cell leftCell, final Cell rightCell) { + private static int compareWithoutRow(final Cell left, final Cell right) { // If the column is not specified, the "minimum" key type appears the // latest in the sorted order, regardless of the timestamp. This is used // for specifying the last key/value in a given row, because there is no // "lexicographically last column" (it would be infinitely long). The // "maximum" key type does not need this behavior. // Copied from KeyValue. This is bad in that we can't do memcmp w/ special rules like this. - // TODO - if (leftCell.getFamilyLength() + leftCell.getQualifierLength() == 0 - && leftCell.getTypeByte() == Type.Minimum.getCode()) { + int lFamLength = left.getFamilyLength(); + int rFamLength = right.getFamilyLength(); + int lQualLength = left.getQualifierLength(); + int rQualLength = right.getQualifierLength(); + if (lFamLength + lQualLength == 0 + && left.getTypeByte() == Type.Minimum.getCode()) { // left is "bigger", i.e. it appears later in the sorted order return 1; } - if (rightCell.getFamilyLength() + rightCell.getQualifierLength() == 0 - && rightCell.getTypeByte() == Type.Minimum.getCode()) { + if (rFamLength + rQualLength == 0 + && right.getTypeByte() == Type.Minimum.getCode()) { return -1; } - boolean sameFamilySize = (leftCell.getFamilyLength() == rightCell.getFamilyLength()); + boolean sameFamilySize = (lFamLength == rFamLength); + int lFamOffset = left.getFamilyOffset(); + int rFamOffset = right.getFamilyOffset(); if (!sameFamilySize) { // comparing column family is enough. - - return Bytes.compareTo(leftCell.getFamilyArray(), leftCell.getFamilyOffset(), - leftCell.getFamilyLength(), rightCell.getFamilyArray(), rightCell.getFamilyOffset(), - rightCell.getFamilyLength()); + return compareFamilies(left, lFamOffset, lFamLength, right, rFamOffset, rFamLength); } - int diff = compareColumns(leftCell, rightCell); + // Families are same. Compare on qualifiers. + int lQualOffset = left.getQualifierOffset(); + int rQualOffset = right.getQualifierOffset(); + int diff = compareColumns(left, lFamOffset, lFamLength, lQualOffset, lQualLength, right, + rFamOffset, rFamLength, rQualOffset, rQualLength); if (diff != 0) return diff; - diff = compareTimestamps(leftCell, rightCell); + diff = compareTimestamps(left, right); if (diff != 0) return diff; // Compare types. Let the delete types sort ahead of puts; i.e. types // of higher numbers sort before those of lesser numbers. Maximum (255) // appears ahead of everything, and minimum (0) appears after // everything. - return (0xff & rightCell.getTypeByte()) - (0xff & leftCell.getTypeByte()); + return (0xff & right.getTypeByte()) - (0xff & left.getTypeByte()); } public static int compareTimestamps(final Cell left, final Cell right) { - long ltimestamp = left.getTimestamp(); - long rtimestamp = right.getTimestamp(); - return compareTimestamps(ltimestamp, rtimestamp); + return CellUtil.compareTimestamps(left.getTimestamp(), right.getTimestamp()); } - /********************* hashCode ************************/ - /** - * Returns a hash code that is always the same for two Cells having a matching equals(..) result. + * Used to compare two cells based on the hint provided + * @param nextIndexedCell the next indexed cell + * @param currentCell the cell to be compared + * @param foff the family offset of the currentCell + * @param flen the family length of the currentCell + * @param colHint the column hint provided - could be null + * @param coff the offset of the column hint if provided, if not offset of the currentCell's + * qualifier + * @param clen the length of the column hint if provided, if not length of the currentCell's + * qualifier + * @param ts the timestamp to be seeked + * @param type the type to be seeked + * @return an int based on the given column hint */ - public static int hashCode(Cell cell){ - if (cell == null) {// return 0 for empty Cell - return 0; + // compare a key against row/fam/qual/ts/type + public final int compareKeyBasedOnColHint(Cell nextIndexedCell, Cell currentCell, int foff, + int flen, byte[] colHint, int coff, int clen, long ts, byte type) { + + int compare = 0; + compare = compareRows(nextIndexedCell, nextIndexedCell.getRowOffset(), + nextIndexedCell.getRowLength(), currentCell, currentCell.getRowOffset(), + currentCell.getRowLength()); + if (compare != 0) { + return compare; } - - int hash = calculateHashForKeyValue(cell); - hash = 31 * hash + (int)cell.getMvccVersion(); - return hash; - } - - /** - * Returns a hash code that is always the same for two Cells having a matching - * equals(..) result. Note : Ignore mvcc while calculating the hashcode - * - * @param cell - * @return hashCode - */ - public static int hashCodeIgnoreMvcc(Cell cell) { - if (cell == null) {// return 0 for empty Cell - return 0; + // If the column is not specified, the "minimum" key type appears the + // latest in the sorted order, regardless of the timestamp. This is used + // for specifying the last key/value in a given row, because there is no + // "lexicographically last column" (it would be infinitely long). The + // "maximum" key type does not need this behavior. + if (nextIndexedCell.getFamilyLength() + nextIndexedCell.getQualifierLength() == 0 + && nextIndexedCell.getTypeByte() == Type.Minimum.getCode()) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + int qualLen = currentCell.getQualifierLength(); + if (flen + clen == 0 && type == Type.Minimum.getCode()) { + return -1; } - int hash = calculateHashForKeyValue(cell); - return hash; - } - - private static int calculateHashForKeyValue(Cell cell) { - //pre-calculate the 3 hashes made of byte ranges - int rowHash = Bytes.hashCode(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); - int familyHash = - Bytes.hashCode(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); - int qualifierHash = Bytes.hashCode(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); - - //combine the 6 sub-hashes - int hash = 31 * rowHash + familyHash; - hash = 31 * hash + qualifierHash; - hash = 31 * hash + (int)cell.getTimestamp(); - hash = 31 * hash + cell.getTypeByte(); - return hash; - } - - - /******************** lengths *************************/ - - public static boolean areKeyLengthsEqual(Cell a, Cell b) { - return a.getRowLength() == b.getRowLength() - && a.getFamilyLength() == b.getFamilyLength() - && a.getQualifierLength() == b.getQualifierLength(); - } + compare = compareFamilies(nextIndexedCell, nextIndexedCell.getFamilyOffset(), + nextIndexedCell.getFamilyLength(), currentCell, currentCell.getFamilyOffset(), + flen); + if (compare != 0) { + return compare; + } + if (colHint == null) { + compare = compareQualifiers(nextIndexedCell, nextIndexedCell.getQualifierOffset(), + nextIndexedCell.getQualifierLength(), currentCell, currentCell.getQualifierOffset(), + qualLen); + } else { + compare = compareQualifiers(nextIndexedCell, nextIndexedCell.getQualifierOffset(), + nextIndexedCell.getQualifierLength(), colHint, coff, clen); + } + if (compare != 0) { + return compare; + } + // Next compare timestamps. + compare = CellUtil.compareTimestamps(nextIndexedCell.getTimestamp(), ts); + if (compare != 0) { + return compare; + } - public static boolean areRowLengthsEqual(Cell a, Cell b) { - return a.getRowLength() == b.getRowLength(); + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & type) - (0xff & nextIndexedCell.getTypeByte()); } - /*********************common prefixes*************************/ - - private static int compare(byte[] left, int leftOffset, int leftLength, byte[] right, - int rightOffset, int rightLength) { - return Bytes.compareTo(left, leftOffset, leftLength, right, rightOffset, rightLength); - } - + // Having this as static is fine but if META is having DBE then we should change this. public static int compareCommonRowPrefix(Cell left, Cell right, int rowCommonPrefix) { - return compare(left.getRowArray(), left.getRowOffset() + rowCommonPrefix, left.getRowLength() - - rowCommonPrefix, right.getRowArray(), right.getRowOffset() + rowCommonPrefix, - right.getRowLength() - rowCommonPrefix); + return Bytes.compareTo(left.getRowArray(), left.getRowOffset() + rowCommonPrefix, + left.getRowLength() - rowCommonPrefix, right.getRowArray(), right.getRowOffset() + + rowCommonPrefix, right.getRowLength() - rowCommonPrefix); } - public static int compareCommonFamilyPrefix(Cell left, Cell right, - int familyCommonPrefix) { - return compare(left.getFamilyArray(), left.getFamilyOffset() + familyCommonPrefix, + public static int compareCommonFamilyPrefix(Cell left, Cell right, int familyCommonPrefix) { + return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset() + familyCommonPrefix, left.getFamilyLength() - familyCommonPrefix, right.getFamilyArray(), - right.getFamilyOffset() + familyCommonPrefix, - right.getFamilyLength() - familyCommonPrefix); + right.getFamilyOffset() + familyCommonPrefix, right.getFamilyLength() - familyCommonPrefix); } - public static int compareCommonQualifierPrefix(Cell left, Cell right, - int qualCommonPrefix) { - return compare(left.getQualifierArray(), left.getQualifierOffset() + qualCommonPrefix, + public static int compareCommonQualifierPrefix(Cell left, Cell right, int qualCommonPrefix) { + return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset() + qualCommonPrefix, left.getQualifierLength() - qualCommonPrefix, right.getQualifierArray(), right.getQualifierOffset() + qualCommonPrefix, right.getQualifierLength() - qualCommonPrefix); } - /***************** special cases ****************************/ - /** - * special case for KeyValue.equals - */ - public static boolean equalsIgnoreMvccVersion(Cell a, Cell b){ - return 0 == compareStaticIgnoreMvccVersion(a, b); - } - - private static int compareStaticIgnoreMvccVersion(Cell a, Cell b) { - // row - int c = compareRows(a, b); - if (c != 0) return c; - - // family - c = compareColumns(a, b); - if (c != 0) return c; - - // timestamp: later sorts first - c = compareTimestamps(a, b); - if (c != 0) return c; - - //type - c = (0xff & b.getTypeByte()) - (0xff & a.getTypeByte()); - return c; - } - - private static int compareTimestamps(final long ltimestamp, final long rtimestamp) { - // The below older timestamps sorting ahead of newer timestamps looks - // wrong but it is intentional. This way, newer timestamps are first - // found when we iterate over a memstore and newer versions are the - // first we trip over when reading from a store file. - if (ltimestamp < rtimestamp) { - return 1; - } else if (ltimestamp > rtimestamp) { - return -1; - } - return 0; - } - /** - * Counter part for the KeyValue.RowOnlyComparator + * Comparator that compares row component only of a Cell */ public static class RowComparator extends CellComparator { @Override @@ -380,119 +500,91 @@ public class CellComparator implements Comparator, Serializable { } /** - * Try to return a Cell that falls between left and right but that is - * shorter; i.e. takes up less space. This trick is used building HFile block index. - * Its an optimization. It does not always work. In this case we'll just return the - * right cell. - * @param comparator Comparator to use. - * @param left - * @param right - * @return A cell that sorts between left and right. + * The HFileV2 file format's trailer contains this class name. We reinterpret this and + * instantiate the appropriate comparator. + * TODO: With V3 consider removing this. + * @return legacy class name for FileFileTrailer#comparatorClassName */ - public static Cell getMidpoint(final KeyValue.KVComparator comparator, final Cell left, - final Cell right) { - // TODO: Redo so only a single pass over the arrays rather than one to compare and then a - // second composing midpoint. - if (right == null) { - throw new IllegalArgumentException("right cell can not be null"); - } - if (left == null) { - return right; - } - // If Cells from meta table, don't mess around. meta table Cells have schema - // (table,startrow,hash) so can't be treated as plain byte arrays. Just skip out without - // trying to do this optimization. - if (comparator != null && comparator instanceof KeyValue.MetaComparator) { - return right; - } - int diff = compareRows(left, right); - if (diff > 0) { - throw new IllegalArgumentException("Left row sorts after right row; left=" + - CellUtil.getCellKeyAsString(left) + ", right=" + CellUtil.getCellKeyAsString(right)); - } - if (diff < 0) { - // Left row is < right row. - byte [] midRow = getMinimumMidpointArray(left.getRowArray(), left.getRowOffset(), - left.getRowLength(), - right.getRowArray(), right.getRowOffset(), right.getRowLength()); - // If midRow is null, just return 'right'. Can't do optimization. - if (midRow == null) return right; - return CellUtil.createCell(midRow); - } - // Rows are same. Compare on families. - diff = compareFamilies(left, right); - if (diff > 0) { - throw new IllegalArgumentException("Left family sorts after right family; left=" + - CellUtil.getCellKeyAsString(left) + ", right=" + CellUtil.getCellKeyAsString(right)); - } - if (diff < 0) { - byte [] midRow = getMinimumMidpointArray(left.getFamilyArray(), left.getFamilyOffset(), - left.getFamilyLength(), - right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); - // If midRow is null, just return 'right'. Can't do optimization. - if (midRow == null) return right; - // Return new Cell where we use right row and then a mid sort family. - return CellUtil.createCell(right.getRowArray(), right.getRowOffset(), right.getRowLength(), - midRow, 0, midRow.length, HConstants.EMPTY_BYTE_ARRAY, 0, - HConstants.EMPTY_BYTE_ARRAY.length); - } - // Families are same. Compare on qualifiers. - diff = compareQualifiers(left, right); - if (diff > 0) { - throw new IllegalArgumentException("Left qualifier sorts after right qualifier; left=" + - CellUtil.getCellKeyAsString(left) + ", right=" + CellUtil.getCellKeyAsString(right)); - } - if (diff < 0) { - byte [] midRow = getMinimumMidpointArray(left.getQualifierArray(), left.getQualifierOffset(), - left.getQualifierLength(), - right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); - // If midRow is null, just return 'right'. Can't do optimization. - if (midRow == null) return right; - // Return new Cell where we use right row and family and then a mid sort qualifier. - return CellUtil.createCell(right.getRowArray(), right.getRowOffset(), right.getRowLength(), - right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength(), - midRow, 0, midRow.length); - } - // No opportunity for optimization. Just return right key. - return right; + public String getLegacyKeyComparatorName() { + // The path of this comparator is weird. Do we really need to change this? + // There is a TODO above to get rid of this + return "org.apache.hadoop.hbase.KeyValue$KeyComparator"; } /** - * @param leftArray - * @param leftOffset - * @param leftLength - * @param rightArray - * @param rightOffset - * @param rightLength - * @return Return a new array that is between left and right and minimally sized else just return - * null as indicator that we could not create a mid point. + * A {@link CellComparator} for hbase:meta catalog table + * {@link KeyValue}s. */ - private static byte [] getMinimumMidpointArray(final byte [] leftArray, final int leftOffset, - final int leftLength, - final byte [] rightArray, final int rightOffset, final int rightLength) { - // rows are different - int minLength = leftLength < rightLength ? leftLength : rightLength; - short diffIdx = 0; - while (diffIdx < minLength && - leftArray[leftOffset + diffIdx] == rightArray[rightOffset + diffIdx]) { - diffIdx++; - } - byte [] minimumMidpointArray = null; - if (diffIdx >= minLength) { - // leftKey's row is prefix of rightKey's. - minimumMidpointArray = new byte[diffIdx + 1]; - System.arraycopy(rightArray, rightOffset, minimumMidpointArray, 0, diffIdx + 1); - } else { - int diffByte = leftArray[leftOffset + diffIdx]; - if ((0xff & diffByte) < 0xff && (diffByte + 1) < (rightArray[rightOffset + diffIdx] & 0xff)) { - minimumMidpointArray = new byte[diffIdx + 1]; - System.arraycopy(leftArray, leftOffset, minimumMidpointArray, 0, diffIdx); - minimumMidpointArray[diffIdx] = (byte) (diffByte + 1); + public static class MetaCellComparator extends CellComparator { + + @Override + public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, + int rlength) { + int leftDelimiter = Bytes.searchDelimiterIndex(left, loffset, llength, HConstants.DELIMITER); + int rightDelimiter = Bytes + .searchDelimiterIndex(right, roffset, rlength, HConstants.DELIMITER); + // Compare up to the delimiter + int lpart = (leftDelimiter < 0 ? llength : leftDelimiter - loffset); + int rpart = (rightDelimiter < 0 ? rlength : rightDelimiter - roffset); + int result = Bytes.compareTo(left, loffset, lpart, right, roffset, rpart); + if (result != 0) { + return result; } else { - minimumMidpointArray = new byte[diffIdx + 1]; - System.arraycopy(rightArray, rightOffset, minimumMidpointArray, 0, diffIdx + 1); + if (leftDelimiter < 0 && rightDelimiter >= 0) { + return -1; + } else if (rightDelimiter < 0 && leftDelimiter >= 0) { + return 1; + } else if (leftDelimiter < 0 && rightDelimiter < 0) { + return 0; + } } + // Compare middle bit of the row. + // Move past delimiter + leftDelimiter++; + rightDelimiter++; + int leftFarDelimiter = Bytes.searchDelimiterIndexInReverse(left, leftDelimiter, llength + - (leftDelimiter - loffset), HConstants.DELIMITER); + int rightFarDelimiter = Bytes.searchDelimiterIndexInReverse(right, rightDelimiter, rlength + - (rightDelimiter - roffset), HConstants.DELIMITER); + // Now compare middlesection of row. + lpart = (leftFarDelimiter < 0 ? llength + loffset : leftFarDelimiter) - leftDelimiter; + rpart = (rightFarDelimiter < 0 ? rlength + roffset : rightFarDelimiter) - rightDelimiter; + result = super.compareRows(left, leftDelimiter, lpart, right, rightDelimiter, rpart); + if (result != 0) { + return result; + } else { + if (leftDelimiter < 0 && rightDelimiter >= 0) { + return -1; + } else if (rightDelimiter < 0 && leftDelimiter >= 0) { + return 1; + } else if (leftDelimiter < 0 && rightDelimiter < 0) { + return 0; + } + } + // Compare last part of row, the rowid. + leftFarDelimiter++; + rightFarDelimiter++; + result = Bytes.compareTo(left, leftFarDelimiter, llength - (leftFarDelimiter - loffset), + right, rightFarDelimiter, rlength - (rightFarDelimiter - roffset)); + return result; + } + + /** + * Don't do any fancy Block Index splitting tricks. + */ + public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) { + return Arrays.copyOf(rightKey, rightKey.length); + } + + /** + * The HFileV2 file format's trailer contains this class name. We + * reinterpret this and instantiate the appropriate comparator. TODO: With + * V3 consider removing this. + * + * @return legacy class name for FileFileTrailer#comparatorClassName + */ + public String getLegacyKeyComparatorName() { + return "org.apache.hadoop.hbase.KeyValue$MetaKeyComparator"; } - return minimumMidpointArray; } } \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index bce3957..9a24084 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map.Entry; import java.util.NavigableMap; +import org.apache.hadoop.hbase.CellComparator.MetaCellComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -419,7 +420,7 @@ public final class CellUtil { left.getQualifierLength(), buf, 0, buf.length); } - public static boolean matchingQualifier(final Cell left, final byte[] buf, final int offset, + private static boolean matchingQualifier(final Cell left, final byte[] buf, final int offset, final int length) { if (buf == null) { return left.getQualifierLength() == 0; @@ -902,4 +903,309 @@ public final class CellUtil { return builder.toString(); } + + /***************** special cases ****************************/ + + /** + * special case for Cell.equals + */ + public static boolean equalsIgnoreMvccVersion(Cell a, Cell b) { + // row + boolean res = matchingRow(a, b); + if (!res) + return res; + + // family + res = matchingColumn(a, b); + if (!res) + return res; + + // timestamp: later sorts first + long ltimestamp = a.getTimestamp(); + long rtimestamp = b.getTimestamp(); + int c = compareTimestamps(ltimestamp, rtimestamp); + if (c != 0) + return false; + + // type + c = (0xff & b.getTypeByte()) - (0xff & a.getTypeByte()); + if (c != 0) + return false; + else return true; + } + + public static int compareTimestamps(final long ltimestamp, final long rtimestamp) { + // The below older timestamps sorting ahead of newer timestamps looks + // wrong but it is intentional. This way, newer timestamps are first + // found when we iterate over a memstore and newer versions are the + // first we trip over when reading from a store file. + if (ltimestamp < rtimestamp) { + return 1; + } else if (ltimestamp > rtimestamp) { + return -1; + } + return 0; + } + + /**************** equals ****************************/ + + public static boolean equals(Cell a, Cell b) { + return matchingRow(a, b) && matchingFamily(a, b) && matchingQualifier(a, b) + && matchingTimestamp(a, b) && matchingType(a, b); + } + + public static boolean matchingTimestamp(Cell a, Cell b) { + return a.getTimestamp() == b.getTimestamp(); + } + + public static boolean matchingType(Cell a, Cell b) { + return a.getTypeByte() == b.getTypeByte(); + } + + /** + * Compares the row of two keyvalues for equality + * + * @param left + * @param right + * @return True if rows match. + */ + public static boolean matchingRows(final Cell left, final Cell right) { + short lrowlength = left.getRowLength(); + short rrowlength = right.getRowLength(); + return matchingRows(left, lrowlength, right, rrowlength); + } + + /** + * @param left + * @param lrowlength + * @param right + * @param rrowlength + * @return True if rows match. + */ + private static boolean matchingRows(final Cell left, final short lrowlength, final Cell right, + final short rrowlength) { + return lrowlength == rrowlength + && matchingRows(left.getRowArray(), left.getRowOffset(), lrowlength, right.getRowArray(), + right.getRowOffset(), rrowlength); + } + + /** + * Compare rows. Just calls Bytes.equals, but it's good to have this + * encapsulated. + * + * @param left + * Left row array. + * @param loffset + * Left row offset. + * @param llength + * Left row length. + * @param right + * Right row array. + * @param roffset + * Right row offset. + * @param rlength + * Right row length. + * @return Whether rows are the same row. + */ + private static boolean matchingRows(final byte[] left, final int loffset, final int llength, + final byte[] right, final int roffset, final int rlength) { + return Bytes.equals(left, loffset, llength, right, roffset, rlength); + } + + /** + * Compares the row and column of two keyvalues for equality + * + * @param left + * @param right + * @return True if same row and column. + */ + public static boolean matchingRowColumn(final Cell left, final Cell right) { + short lrowlength = left.getRowLength(); + short rrowlength = right.getRowLength(); + + if ((lrowlength + left.getFamilyLength() + left.getQualifierLength()) != (rrowlength + + right.getFamilyLength() + right.getQualifierLength())) { + return false; + } + + if (!matchingRows(left, lrowlength, right, rrowlength)) { + return false; + } + return matchingColumn(left, right); + } + + /** + * Try to return a Cell that falls between left and + * right but that is shorter; i.e. takes up less space. This + * trick is used building HFile block index. Its an optimization. It does not + * always work. In this case we'll just return the right cell. + * + * @param comparator + * Comparator to use. + * @param left + * @param right + * @return A cell that sorts between left and right. + */ + public static Cell getMidpoint(final CellComparator comparator, final Cell left, + final Cell right) { + // TODO: Redo so only a single pass over the arrays rather than one to + // compare and then a + // second composing midpoint. + if (right == null) { + throw new IllegalArgumentException("right cell can not be null"); + } + if (left == null) { + return right; + } + // If Cells from meta table, don't mess around. meta table Cells have schema + // (table,startrow,hash) so can't be treated as plain byte arrays. Just skip + // out without + // trying to do this optimization. + if (comparator != null && comparator instanceof MetaCellComparator) { + return right; + } + int diff = comparator.compareRows(left, right); + if (diff > 0) { + throw new IllegalArgumentException("Left row sorts after right row; left=" + + CellUtil.getCellKeyAsString(left) + ", right=" + CellUtil.getCellKeyAsString(right)); + } + if (diff < 0) { + // Left row is < right row. + byte[] midRow = getMinimumMidpointArray(left.getRowArray(), left.getRowOffset(), + left.getRowLength(), right.getRowArray(), right.getRowOffset(), right.getRowLength()); + // If midRow is null, just return 'right'. Can't do optimization. + if (midRow == null) + return right; + return CellUtil.createCell(midRow); + } + // Rows are same. Compare on families. + int lFamOffset = left.getFamilyOffset(); + int rFamOffset = right.getFamilyOffset(); + int lFamLength = left.getFamilyLength(); + int rFamLength = right.getFamilyLength(); + diff = CellComparator.compareFamilies(left, right); + if (diff > 0) { + throw new IllegalArgumentException("Left family sorts after right family; left=" + + CellUtil.getCellKeyAsString(left) + ", right=" + CellUtil.getCellKeyAsString(right)); + } + if (diff < 0) { + byte[] midRow = getMinimumMidpointArray(left.getFamilyArray(), lFamOffset, + lFamLength, right.getFamilyArray(), rFamOffset, + rFamLength); + // If midRow is null, just return 'right'. Can't do optimization. + if (midRow == null) + return right; + // Return new Cell where we use right row and then a mid sort family. + return CellUtil.createCell(right.getRowArray(), right.getRowOffset(), right.getRowLength(), + midRow, 0, midRow.length, HConstants.EMPTY_BYTE_ARRAY, 0, + HConstants.EMPTY_BYTE_ARRAY.length); + } + // Families are same. Compare on qualifiers. + diff = CellComparator.compareQualifiers(left, right); + if (diff > 0) { + throw new IllegalArgumentException("Left qualifier sorts after right qualifier; left=" + + CellUtil.getCellKeyAsString(left) + ", right=" + CellUtil.getCellKeyAsString(right)); + } + if (diff < 0) { + byte[] midRow = getMinimumMidpointArray(left.getQualifierArray(), left.getQualifierOffset(), + left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), + right.getQualifierLength()); + // If midRow is null, just return 'right'. Can't do optimization. + if (midRow == null) + return right; + // Return new Cell where we use right row and family and then a mid sort + // qualifier. + return CellUtil.createCell(right.getRowArray(), right.getRowOffset(), right.getRowLength(), + right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength(), midRow, 0, + midRow.length); + } + // No opportunity for optimization. Just return right key. + return right; + } + + /** + * @param leftArray + * @param leftOffset + * @param leftLength + * @param rightArray + * @param rightOffset + * @param rightLength + * @return Return a new array that is between left and right and minimally + * sized else just return null as indicator that we could not create a + * mid point. + */ + private static byte[] getMinimumMidpointArray(final byte[] leftArray, final int leftOffset, + final int leftLength, final byte[] rightArray, final int rightOffset, final int rightLength) { + // rows are different + int minLength = leftLength < rightLength ? leftLength : rightLength; + short diffIdx = 0; + while (diffIdx < minLength + && leftArray[leftOffset + diffIdx] == rightArray[rightOffset + diffIdx]) { + diffIdx++; + } + byte[] minimumMidpointArray = null; + if (diffIdx >= minLength) { + // leftKey's row is prefix of rightKey's. + minimumMidpointArray = new byte[diffIdx + 1]; + System.arraycopy(rightArray, rightOffset, minimumMidpointArray, 0, diffIdx + 1); + } else { + int diffByte = leftArray[leftOffset + diffIdx]; + if ((0xff & diffByte) < 0xff && (diffByte + 1) < (rightArray[rightOffset + diffIdx] & 0xff)) { + minimumMidpointArray = new byte[diffIdx + 1]; + System.arraycopy(leftArray, leftOffset, minimumMidpointArray, 0, diffIdx); + minimumMidpointArray[diffIdx] = (byte) (diffByte + 1); + } else { + minimumMidpointArray = new byte[diffIdx + 1]; + System.arraycopy(rightArray, rightOffset, minimumMidpointArray, 0, diffIdx + 1); + } + } + return minimumMidpointArray; + } + + + /********************* hashCode ************************/ + + /** + * Returns a hash code that is always the same for two Cells having a matching equals(..) result. + */ + public static int hashCode(Cell cell) { + if (cell == null) {// return 0 for empty Cell + return 0; + } + + int hash = calculateHashForKey(cell); + hash = 31 * hash + (int)cell.getMvccVersion(); + return hash; + } + /** + * Returns a hash code that is always the same for two Cells having a matching + * equals(..) result. Note : Ignore mvcc while calculating the hashcode + * + * @param cell + * @return hashCode + */ + public static int hashCodeIgnoreMvcc(Cell cell) { + if (cell == null) {// return 0 for empty Cell + return 0; + } + + int hash = calculateHashForKey(cell); + return hash; + } + + private static int calculateHashForKey(Cell cell) { + //pre-calculate the 3 hashes made of byte ranges + int rowHash = Bytes.hashCode(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + int familyHash = + Bytes.hashCode(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); + int qualifierHash = Bytes.hashCode(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()); + + //combine the 6 sub-hashes + int hash = 31 * rowHash + familyHash; + hash = 31 * hash + qualifierHash; + hash = 31 * hash + (int)cell.getTimestamp(); + hash = 31 * hash + cell.getTypeByte(); + return hash; + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 7de1f54..5d8777f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -21,15 +21,11 @@ package org.apache.hadoop.hbase; import static org.apache.hadoop.hbase.util.Bytes.len; -import java.io.DataInput; -import java.io.DataOutput; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; -import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -41,10 +37,6 @@ import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.util.StreamUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.RawComparator; - -import com.google.common.annotations.VisibleForTesting; /** * An HBase Key/Value. This is the fundamental HBase Type. @@ -92,22 +84,6 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, public static final byte[] COLUMN_FAMILY_DELIM_ARRAY = new byte[]{COLUMN_FAMILY_DELIMITER}; - /** - * Comparator for plain key/values; i.e. non-catalog table key/values. Works on Key portion - * of KeyValue only. - */ - public static final KVComparator COMPARATOR = new KVComparator(); - /** - * A {@link KVComparator} for hbase:meta catalog table - * {@link KeyValue}s. - */ - public static final KVComparator META_COMPARATOR = new MetaComparator(); - - /** - * Needed for Bloom Filters. - */ - public static final KVComparator RAW_COMPARATOR = new RawBytesComparator(); - /** Size of the key length field in bytes*/ public static final int KEY_LENGTH_SIZE = Bytes.SIZEOF_INT; @@ -1061,7 +1037,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, if (!(other instanceof Cell)) { return false; } - return CellComparator.equals(this, (Cell)other); + return CellUtil.equals(this, (Cell)other); } /** @@ -1069,7 +1045,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, */ @Override public int hashCode() { - return CellComparator.hashCodeIgnoreMvcc(this); + return CellUtil.hashCodeIgnoreMvcc(this); } //--------------------------------------------------------------------------- @@ -1637,7 +1613,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * @return The parsed column. */ public static byte [][] parseColumn(byte [] c) { - final int index = getDelimiter(c, 0, c.length, COLUMN_FAMILY_DELIMITER); + final int index = Bytes.searchDelimiterIndex(c, 0, c.length, COLUMN_FAMILY_DELIMITER); if (index == -1) { // If no delimiter, return array of size 1 return new byte [][] { c }; @@ -1668,134 +1644,12 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, public static byte [] makeColumn(byte [] family, byte [] qualifier) { return Bytes.add(family, COLUMN_FAMILY_DELIM_ARRAY, qualifier); } - - /** - * @param b - * @param delimiter - * @return Index of delimiter having started from start of b - * moving rightward. - */ - public static int getDelimiter(final byte [] b, int offset, final int length, - final int delimiter) { - if (b == null) { - throw new IllegalArgumentException("Passed buffer is null"); - } - int result = -1; - for (int i = offset; i < length + offset; i++) { - if (b[i] == delimiter) { - result = i; - break; - } - } - return result; - } - - /** - * Find index of passed delimiter walking from end of buffer backwards. - * @param b - * @param delimiter - * @return Index of delimiter - */ - public static int getDelimiterInReverse(final byte [] b, final int offset, - final int length, final int delimiter) { - if (b == null) { - throw new IllegalArgumentException("Passed buffer is null"); - } - int result = -1; - for (int i = (offset + length) - 1; i >= offset; i--) { - if (b[i] == delimiter) { - result = i; - break; - } - } - return result; - } - + /** - * A {@link KVComparator} for hbase:meta catalog table - * {@link KeyValue}s. + * This comparator is used in ROW bloom and in the meta index blocks in HFiles */ - public static class MetaComparator extends KVComparator { - /** - * Compare key portion of a {@link KeyValue} for keys in hbase:meta - * table. - */ - @Override - public int compare(final Cell left, final Cell right) { - int c = compareRowKey(left, right); - if (c != 0) { - return c; - } - return CellComparator.compareWithoutRow(left, right); - } - - @Override - public int compareOnlyKeyPortion(Cell left, Cell right) { - return compare(left, right); - } - - @Override - public int compareRows(byte [] left, int loffset, int llength, - byte [] right, int roffset, int rlength) { - int leftDelimiter = getDelimiter(left, loffset, llength, - HConstants.DELIMITER); - int rightDelimiter = getDelimiter(right, roffset, rlength, - HConstants.DELIMITER); - // Compare up to the delimiter - int lpart = (leftDelimiter < 0 ? llength :leftDelimiter - loffset); - int rpart = (rightDelimiter < 0 ? rlength :rightDelimiter - roffset); - int result = Bytes.compareTo(left, loffset, lpart, right, roffset, rpart); - if (result != 0) { - return result; - } else { - if (leftDelimiter < 0 && rightDelimiter >= 0) { - return -1; - } else if (rightDelimiter < 0 && leftDelimiter >= 0) { - return 1; - } else if (leftDelimiter < 0 && rightDelimiter < 0) { - return 0; - } - } - // Compare middle bit of the row. - // Move past delimiter - leftDelimiter++; - rightDelimiter++; - int leftFarDelimiter = getDelimiterInReverse(left, leftDelimiter, - llength - (leftDelimiter - loffset), HConstants.DELIMITER); - int rightFarDelimiter = getDelimiterInReverse(right, - rightDelimiter, rlength - (rightDelimiter - roffset), - HConstants.DELIMITER); - // Now compare middlesection of row. - lpart = (leftFarDelimiter < 0 ? llength + loffset: leftFarDelimiter) - leftDelimiter; - rpart = (rightFarDelimiter < 0 ? rlength + roffset: rightFarDelimiter)- rightDelimiter; - result = super.compareRows(left, leftDelimiter, lpart, right, rightDelimiter, rpart); - if (result != 0) { - return result; - } else { - if (leftDelimiter < 0 && rightDelimiter >= 0) { - return -1; - } else if (rightDelimiter < 0 && leftDelimiter >= 0) { - return 1; - } else if (leftDelimiter < 0 && rightDelimiter < 0) { - return 0; - } - } - // Compare last part of row, the rowid. - leftFarDelimiter++; - rightFarDelimiter++; - result = Bytes.compareTo(left, leftFarDelimiter, llength - (leftFarDelimiter - loffset), - right, rightFarDelimiter, rlength - (rightFarDelimiter - roffset)); - return result; - } - - /** - * Don't do any fancy Block Index splitting tricks. - */ - @Override - public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) { - return Arrays.copyOf(rightKey, rightKey.length); - } - + //TODO : Get rid of this from the HFiles also + public static class RawBytesComparator extends CellComparator { /** * The HFileV2 file format's trailer contains this class name. We reinterpret this and * instantiate the appropriate comparator. @@ -1804,714 +1658,29 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, */ @Override public String getLegacyKeyComparatorName() { - return "org.apache.hadoop.hbase.KeyValue$MetaKeyComparator"; - } - - @Override - protected Object clone() throws CloneNotSupportedException { - return new MetaComparator(); - } - - /** - * Override the row key comparison to parse and compare the meta row key parts. - */ - @Override - protected int compareRowKey(final Cell l, final Cell r) { - byte[] left = l.getRowArray(); - int loffset = l.getRowOffset(); - int llength = l.getRowLength(); - byte[] right = r.getRowArray(); - int roffset = r.getRowOffset(); - int rlength = r.getRowLength(); - return compareRows(left, loffset, llength, right, roffset, rlength); - } - } - - /** - * Compare KeyValues. When we compare KeyValues, we only compare the Key - * portion. This means two KeyValues with same Key but different Values are - * considered the same as far as this Comparator is concerned. - */ - public static class KVComparator implements RawComparator, SamePrefixComparator { - - /** - * The HFileV2 file format's trailer contains this class name. We reinterpret this and - * instantiate the appropriate comparator. - * TODO: With V3 consider removing this. - * @return legacy class name for FileFileTrailer#comparatorClassName - */ - public String getLegacyKeyComparatorName() { - return "org.apache.hadoop.hbase.KeyValue$KeyComparator"; - } - - @Override // RawComparator - public int compare(byte[] l, int loff, int llen, byte[] r, int roff, int rlen) { - return compareFlatKey(l,loff,llen, r,roff,rlen); - } - - - /** - * Compares the only the user specified portion of a Key. This is overridden by MetaComparator. - * @param left - * @param right - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - protected int compareRowKey(final Cell left, final Cell right) { - return CellComparator.compareRows(left, right); - } - - /** - * Compares left to right assuming that left,loffset,llength and right,roffset,rlength are - * full KVs laid out in a flat byte[]s. - * @param left - * @param loffset - * @param llength - * @param right - * @param roffset - * @param rlength - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - public int compareFlatKey(byte[] left, int loffset, int llength, - byte[] right, int roffset, int rlength) { - // Compare row - short lrowlength = Bytes.toShort(left, loffset); - short rrowlength = Bytes.toShort(right, roffset); - int compare = compareRows(left, loffset + Bytes.SIZEOF_SHORT, - lrowlength, right, roffset + Bytes.SIZEOF_SHORT, rrowlength); - if (compare != 0) { - return compare; - } - - // Compare the rest of the two KVs without making any assumptions about - // the common prefix. This function will not compare rows anyway, so we - // don't need to tell it that the common prefix includes the row. - return compareWithoutRow(0, left, loffset, llength, right, roffset, - rlength, rrowlength); - } - - public int compareFlatKey(byte[] left, byte[] right) { - return compareFlatKey(left, 0, left.length, right, 0, right.length); - } - - // compare a key against row/fam/qual/ts/type - public int compareKey(Cell cell, - byte[] row, int roff, int rlen, - byte[] fam, int foff, int flen, - byte[] col, int coff, int clen, - long ts, byte type) { - - int compare = compareRows( - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), - row, roff, rlen); - if (compare != 0) { - return compare; - } - // If the column is not specified, the "minimum" key type appears the - // latest in the sorted order, regardless of the timestamp. This is used - // for specifying the last key/value in a given row, because there is no - // "lexicographically last column" (it would be infinitely long). The - // "maximum" key type does not need this behavior. - if (cell.getFamilyLength() + cell.getQualifierLength() == 0 - && cell.getTypeByte() == Type.Minimum.getCode()) { - // left is "bigger", i.e. it appears later in the sorted order - return 1; - } - if (flen+clen == 0 && type == Type.Minimum.getCode()) { - return -1; - } - - compare = compareFamilies( - cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), - fam, foff, flen); - if (compare != 0) { - return compare; - } - compare = compareColumns( - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), - col, coff, clen); - if (compare != 0) { - return compare; - } - // Next compare timestamps. - compare = compareTimestamps(cell.getTimestamp(), ts); - if (compare != 0) { - return compare; - } - - // Compare types. Let the delete types sort ahead of puts; i.e. types - // of higher numbers sort before those of lesser numbers. Maximum (255) - // appears ahead of everything, and minimum (0) appears after - // everything. - return (0xff & type) - (0xff & cell.getTypeByte()); - } - - public int compareOnlyKeyPortion(Cell left, Cell right) { - return CellComparator.compare(left, right, true); - } - - /** - * Compares the Key of a cell -- with fields being more significant in this order: - * rowkey, colfam/qual, timestamp, type, mvcc - */ - @Override - public int compare(final Cell left, final Cell right) { - int compare = CellComparator.compare(left, right, false); - return compare; - } - - public int compareTimestamps(final Cell left, final Cell right) { - return CellComparator.compareTimestamps(left, right); - } - - /** - * @param left - * @param right - * @return Result comparing rows. - */ - public int compareRows(final Cell left, final Cell right) { - return compareRows(left.getRowArray(),left.getRowOffset(), left.getRowLength(), - right.getRowArray(), right.getRowOffset(), right.getRowLength()); - } - - /** - * Get the b[],o,l for left and right rowkey portions and compare. - * @param left - * @param loffset - * @param llength - * @param right - * @param roffset - * @param rlength - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - public int compareRows(byte [] left, int loffset, int llength, - byte [] right, int roffset, int rlength) { - return Bytes.compareTo(left, loffset, llength, right, roffset, rlength); - } - - int compareColumns(final Cell left, final short lrowlength, final Cell right, - final short rrowlength) { - return CellComparator.compareColumns(left, right); - } - - protected int compareColumns( - byte [] left, int loffset, int llength, final int lfamilylength, - byte [] right, int roffset, int rlength, final int rfamilylength) { - // Compare family portion first. - int diff = Bytes.compareTo(left, loffset, lfamilylength, - right, roffset, rfamilylength); - if (diff != 0) { - return diff; - } - // Compare qualifier portion - return Bytes.compareTo(left, loffset + lfamilylength, - llength - lfamilylength, - right, roffset + rfamilylength, rlength - rfamilylength); - } - - static int compareTimestamps(final long ltimestamp, final long rtimestamp) { - // The below older timestamps sorting ahead of newer timestamps looks - // wrong but it is intentional. This way, newer timestamps are first - // found when we iterate over a memstore and newer versions are the - // first we trip over when reading from a store file. - if (ltimestamp < rtimestamp) { - return 1; - } else if (ltimestamp > rtimestamp) { - return -1; - } - return 0; - } - - /** - * Overridden - * @param commonPrefix - * @param left - * @param loffset - * @param llength - * @param right - * @param roffset - * @param rlength - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - @Override // SamePrefixComparator - public int compareIgnoringPrefix(int commonPrefix, byte[] left, - int loffset, int llength, byte[] right, int roffset, int rlength) { - // Compare row - short lrowlength = Bytes.toShort(left, loffset); - short rrowlength; - - int comparisonResult = 0; - if (commonPrefix < ROW_LENGTH_SIZE) { - // almost nothing in common - rrowlength = Bytes.toShort(right, roffset); - comparisonResult = compareRows(left, loffset + ROW_LENGTH_SIZE, - lrowlength, right, roffset + ROW_LENGTH_SIZE, rrowlength); - } else { // the row length is the same - rrowlength = lrowlength; - if (commonPrefix < ROW_LENGTH_SIZE + rrowlength) { - // The rows are not the same. Exclude the common prefix and compare - // the rest of the two rows. - int common = commonPrefix - ROW_LENGTH_SIZE; - comparisonResult = compareRows( - left, loffset + common + ROW_LENGTH_SIZE, lrowlength - common, - right, roffset + common + ROW_LENGTH_SIZE, rrowlength - common); - } - } - if (comparisonResult != 0) { - return comparisonResult; - } - - assert lrowlength == rrowlength; - return compareWithoutRow(commonPrefix, left, loffset, llength, right, - roffset, rlength, lrowlength); - } - - /** - * Compare columnFamily, qualifier, timestamp, and key type (everything - * except the row). This method is used both in the normal comparator and - * the "same-prefix" comparator. Note that we are assuming that row portions - * of both KVs have already been parsed and found identical, and we don't - * validate that assumption here. - * @param commonPrefix - * the length of the common prefix of the two key-values being - * compared, including row length and row - */ - private int compareWithoutRow(int commonPrefix, byte[] left, int loffset, - int llength, byte[] right, int roffset, int rlength, short rowlength) { - /*** - * KeyValue Format and commonLength: - * |_keyLen_|_valLen_|_rowLen_|_rowKey_|_famiLen_|_fami_|_Quali_|.... - * ------------------|-------commonLength--------|-------------- - */ - int commonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + rowlength; - - // commonLength + TIMESTAMP_TYPE_SIZE - int commonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + commonLength; - // ColumnFamily + Qualifier length. - int lcolumnlength = llength - commonLengthWithTSAndType; - int rcolumnlength = rlength - commonLengthWithTSAndType; - - byte ltype = left[loffset + (llength - 1)]; - byte rtype = right[roffset + (rlength - 1)]; - - // If the column is not specified, the "minimum" key type appears the - // latest in the sorted order, regardless of the timestamp. This is used - // for specifying the last key/value in a given row, because there is no - // "lexicographically last column" (it would be infinitely long). The - // "maximum" key type does not need this behavior. - if (lcolumnlength == 0 && ltype == Type.Minimum.getCode()) { - // left is "bigger", i.e. it appears later in the sorted order - return 1; - } - if (rcolumnlength == 0 && rtype == Type.Minimum.getCode()) { - return -1; - } - - int lfamilyoffset = commonLength + loffset; - int rfamilyoffset = commonLength + roffset; - - // Column family length. - int lfamilylength = left[lfamilyoffset - 1]; - int rfamilylength = right[rfamilyoffset - 1]; - // If left family size is not equal to right family size, we need not - // compare the qualifiers. - boolean sameFamilySize = (lfamilylength == rfamilylength); - int common = 0; - if (commonPrefix > 0) { - common = Math.max(0, commonPrefix - commonLength); - if (!sameFamilySize) { - // Common should not be larger than Math.min(lfamilylength, - // rfamilylength). - common = Math.min(common, Math.min(lfamilylength, rfamilylength)); - } else { - common = Math.min(common, Math.min(lcolumnlength, rcolumnlength)); - } - } - if (!sameFamilySize) { - // comparing column family is enough. - return Bytes.compareTo(left, lfamilyoffset + common, lfamilylength - - common, right, rfamilyoffset + common, rfamilylength - common); - } - // Compare family & qualifier together. - final int comparison = Bytes.compareTo(left, lfamilyoffset + common, - lcolumnlength - common, right, rfamilyoffset + common, - rcolumnlength - common); - if (comparison != 0) { - return comparison; - } - - //// - // Next compare timestamps. - long ltimestamp = Bytes.toLong(left, - loffset + (llength - TIMESTAMP_TYPE_SIZE)); - long rtimestamp = Bytes.toLong(right, - roffset + (rlength - TIMESTAMP_TYPE_SIZE)); - int compare = compareTimestamps(ltimestamp, rtimestamp); - if (compare != 0) { - return compare; - } - - // Compare types. Let the delete types sort ahead of puts; i.e. types - // of higher numbers sort before those of lesser numbers. Maximum (255) - // appears ahead of everything, and minimum (0) appears after - // everything. - return (0xff & rtype) - (0xff & ltype); - } - - protected int compareFamilies(final byte[] left, final int loffset, final int lfamilylength, - final byte[] right, final int roffset, final int rfamilylength) { - int diff = Bytes.compareTo(left, loffset, lfamilylength, right, roffset, rfamilylength); - return diff; - } - - protected int compareColumns(final byte[] left, final int loffset, final int lquallength, - final byte[] right, final int roffset, final int rquallength) { - int diff = Bytes.compareTo(left, loffset, lquallength, right, roffset, rquallength); - return diff; - } - /** - * Compares the row and column of two keyvalues for equality - * @param left - * @param right - * @return True if same row and column. - */ - public boolean matchingRowColumn(final Cell left, - final Cell right) { - short lrowlength = left.getRowLength(); - short rrowlength = right.getRowLength(); - - // TsOffset = end of column data. just comparing Row+CF length of each - if ((left.getRowLength() + left.getFamilyLength() + left.getQualifierLength()) != (right - .getRowLength() + right.getFamilyLength() + right.getQualifierLength())) { - return false; - } - - if (!matchingRows(left, lrowlength, right, rrowlength)) { - return false; - } - - int lfoffset = left.getFamilyOffset(); - int rfoffset = right.getFamilyOffset(); - int lclength = left.getQualifierLength(); - int rclength = right.getQualifierLength(); - int lfamilylength = left.getFamilyLength(); - int rfamilylength = right.getFamilyLength(); - int diff = compareFamilies(left.getFamilyArray(), lfoffset, lfamilylength, - right.getFamilyArray(), rfoffset, rfamilylength); - if (diff != 0) { - return false; - } else { - diff = compareColumns(left.getQualifierArray(), left.getQualifierOffset(), lclength, - right.getQualifierArray(), right.getQualifierOffset(), rclength); - return diff == 0; - } - } - - /** - * Compares the row of two keyvalues for equality - * @param left - * @param right - * @return True if rows match. - */ - public boolean matchingRows(final Cell left, final Cell right) { - short lrowlength = left.getRowLength(); - short rrowlength = right.getRowLength(); - return matchingRows(left, lrowlength, right, rrowlength); - } - - /** - * @param left - * @param lrowlength - * @param right - * @param rrowlength - * @return True if rows match. - */ - private boolean matchingRows(final Cell left, final short lrowlength, - final Cell right, final short rrowlength) { - return lrowlength == rrowlength && - matchingRows(left.getRowArray(), left.getRowOffset(), lrowlength, - right.getRowArray(), right.getRowOffset(), rrowlength); - } - - /** - * Compare rows. Just calls Bytes.equals, but it's good to have this encapsulated. - * @param left Left row array. - * @param loffset Left row offset. - * @param llength Left row length. - * @param right Right row array. - * @param roffset Right row offset. - * @param rlength Right row length. - * @return Whether rows are the same row. - */ - public boolean matchingRows(final byte [] left, final int loffset, final int llength, - final byte [] right, final int roffset, final int rlength) { - return Bytes.equals(left, loffset, llength, right, roffset, rlength); - } - - public byte[] calcIndexKey(byte[] lastKeyOfPreviousBlock, byte[] firstKeyInBlock) { - byte[] fakeKey = getShortMidpointKey(lastKeyOfPreviousBlock, firstKeyInBlock); - if (compareFlatKey(fakeKey, firstKeyInBlock) > 0) { - LOG.error("Unexpected getShortMidpointKey result, fakeKey:" - + Bytes.toStringBinary(fakeKey) + ", firstKeyInBlock:" - + Bytes.toStringBinary(firstKeyInBlock)); - return firstKeyInBlock; - } - if (lastKeyOfPreviousBlock != null && compareFlatKey(lastKeyOfPreviousBlock, fakeKey) >= 0) { - LOG.error("Unexpected getShortMidpointKey result, lastKeyOfPreviousBlock:" + - Bytes.toStringBinary(lastKeyOfPreviousBlock) + ", fakeKey:" + - Bytes.toStringBinary(fakeKey)); - return firstKeyInBlock; - } - return fakeKey; - } - - /** - * This is a HFile block index key optimization. - * @param leftKey - * @param rightKey - * @return 0 if equal, <0 if left smaller, >0 if right smaller - * @deprecated Since 0.99.2; Use - * {@link CellComparator#getMidpoint(KeyValue.KVComparator, Cell, Cell) instead} - */ - @Deprecated - public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) { - if (rightKey == null) { - throw new IllegalArgumentException("rightKey can not be null"); - } - if (leftKey == null) { - return Arrays.copyOf(rightKey, rightKey.length); - } - if (compareFlatKey(leftKey, rightKey) >= 0) { - throw new IllegalArgumentException("Unexpected input, leftKey:" + Bytes.toString(leftKey) - + ", rightKey:" + Bytes.toString(rightKey)); - } - - short leftRowLength = Bytes.toShort(leftKey, 0); - short rightRowLength = Bytes.toShort(rightKey, 0); - int leftCommonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + leftRowLength; - int rightCommonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + rightRowLength; - int leftCommonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + leftCommonLength; - int rightCommonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + rightCommonLength; - int leftColumnLength = leftKey.length - leftCommonLengthWithTSAndType; - int rightColumnLength = rightKey.length - rightCommonLengthWithTSAndType; - // rows are equal - if (leftRowLength == rightRowLength && compareRows(leftKey, ROW_LENGTH_SIZE, leftRowLength, - rightKey, ROW_LENGTH_SIZE, rightRowLength) == 0) { - // Compare family & qualifier together. - int comparison = Bytes.compareTo(leftKey, leftCommonLength, leftColumnLength, rightKey, - rightCommonLength, rightColumnLength); - // same with "row + family + qualifier", return rightKey directly - if (comparison == 0) { - return Arrays.copyOf(rightKey, rightKey.length); - } - // "family + qualifier" are different, generate a faked key per rightKey - byte[] newKey = Arrays.copyOf(rightKey, rightKey.length); - Bytes.putLong(newKey, rightKey.length - TIMESTAMP_TYPE_SIZE, HConstants.LATEST_TIMESTAMP); - Bytes.putByte(newKey, rightKey.length - TYPE_SIZE, Type.Maximum.getCode()); - return newKey; - } - // rows are different - short minLength = leftRowLength < rightRowLength ? leftRowLength : rightRowLength; - short diffIdx = 0; - while (diffIdx < minLength - && leftKey[ROW_LENGTH_SIZE + diffIdx] == rightKey[ROW_LENGTH_SIZE + diffIdx]) { - diffIdx++; - } - byte[] newRowKey = null; - if (diffIdx >= minLength) { - // leftKey's row is prefix of rightKey's. - newRowKey = new byte[diffIdx + 1]; - System.arraycopy(rightKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx + 1); - } else { - int diffByte = leftKey[ROW_LENGTH_SIZE + diffIdx]; - if ((0xff & diffByte) < 0xff && (diffByte + 1) < - (rightKey[ROW_LENGTH_SIZE + diffIdx] & 0xff)) { - newRowKey = new byte[diffIdx + 1]; - System.arraycopy(leftKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx); - newRowKey[diffIdx] = (byte) (diffByte + 1); - } else { - newRowKey = new byte[diffIdx + 1]; - System.arraycopy(rightKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx + 1); - } - } - return new KeyValue(newRowKey, null, null, HConstants.LATEST_TIMESTAMP, - Type.Maximum).getKey(); - } - - @Override - protected Object clone() throws CloneNotSupportedException { - super.clone(); - return new KVComparator(); - } - - } - - /** - * @param b - * @return A KeyValue made of a byte array that holds the key-only part. - * Needed to convert hfile index members to KeyValues. - */ - public static KeyValue createKeyValueFromKey(final byte [] b) { - return createKeyValueFromKey(b, 0, b.length); - } - - /** - * @param bb - * @return A KeyValue made of a byte buffer that holds the key-only part. - * Needed to convert hfile index members to KeyValues. - */ - public static KeyValue createKeyValueFromKey(final ByteBuffer bb) { - return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit()); - } - - /** - * @param b - * @param o - * @param l - * @return A KeyValue made of a byte array that holds the key-only part. - * Needed to convert hfile index members to KeyValues. - */ - public static KeyValue createKeyValueFromKey(final byte [] b, final int o, - final int l) { - byte [] newb = new byte[l + ROW_OFFSET]; - System.arraycopy(b, o, newb, ROW_OFFSET, l); - Bytes.putInt(newb, 0, l); - Bytes.putInt(newb, Bytes.SIZEOF_INT, 0); - return new KeyValue(newb); - } - - /** - * @param in Where to read bytes from. Creates a byte array to hold the KeyValue - * backing bytes copied from the steam. - * @return KeyValue created by deserializing from in OR if we find a length - * of zero, we will return null which can be useful marking a stream as done. - * @throws IOException - */ - public static KeyValue create(final DataInput in) throws IOException { - return create(in.readInt(), in); - } - - /** - * Create a KeyValue reading length from in - * @param length - * @param in - * @return Created KeyValue OR if we find a length of zero, we will return null which - * can be useful marking a stream as done. - * @throws IOException - */ - public static KeyValue create(int length, final DataInput in) throws IOException { - - if (length <= 0) { - if (length == 0) return null; - throw new IOException("Failed read " + length + " bytes, stream corrupt?"); - } - - // This is how the old Writables.readFrom used to deserialize. Didn't even vint. - byte [] bytes = new byte[length]; - in.readFully(bytes); - return new KeyValue(bytes, 0, length); - } - - /** - * Create a new KeyValue by copying existing cell and adding new tags - * @param c - * @param newTags - * @return a new KeyValue instance with new tags - */ - public static KeyValue cloneAndAddTags(Cell c, List newTags) { - List existingTags = null; - if(c.getTagsLength() > 0) { - existingTags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); - existingTags.addAll(newTags); - } else { - existingTags = newTags; - } - return new KeyValue(c.getRowArray(), c.getRowOffset(), (int)c.getRowLength(), - c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(), - c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(), - c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(), - c.getValueLength(), existingTags); - } - - /** - * Create a KeyValue reading from the raw InputStream. - * Named iscreate so doesn't clash with {@link #create(DataInput)} - * @param in - * @return Created KeyValue OR if we find a length of zero, we will return null which - * can be useful marking a stream as done. - * @throws IOException - */ - public static KeyValue iscreate(final InputStream in) throws IOException { - byte [] intBytes = new byte[Bytes.SIZEOF_INT]; - int bytesRead = 0; - while (bytesRead < intBytes.length) { - int n = in.read(intBytes, bytesRead, intBytes.length - bytesRead); - if (n < 0) { - if (bytesRead == 0) return null; // EOF at start is ok - throw new IOException("Failed read of int, read " + bytesRead + " bytes"); - } - bytesRead += n; + return "org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator"; } - // TODO: perhaps some sanity check is needed here. - byte [] bytes = new byte[Bytes.toInt(intBytes)]; - IOUtils.readFully(in, bytes, 0, bytes.length); - return new KeyValue(bytes, 0, bytes.length); } /** - * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable. - * @param kv - * @param out - * @return Length written on stream - * @throws IOException - * @see #create(DataInput) for the inverse function - */ - public static long write(final KeyValue kv, final DataOutput out) throws IOException { - // This is how the old Writables write used to serialize KVs. Need to figure way to make it - // work for all implementations. - int length = kv.getLength(); - out.writeInt(length); - out.write(kv.getBuffer(), kv.getOffset(), length); - return length + Bytes.SIZEOF_INT; - } - - /** - * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable but do - * not require a {@link DataOutput}, just take plain {@link OutputStream} - * Named oswrite so does not clash with {@link #write(KeyValue, DataOutput)} - * @param kv - * @param out - * @return Length written on stream - * @throws IOException - * @see #create(DataInput) for the inverse function - * @see #write(KeyValue, DataOutput) - * @deprecated use {@link #oswrite(KeyValue, OutputStream, boolean)} instead - */ - @Deprecated - public static long oswrite(final KeyValue kv, final OutputStream out) - throws IOException { - int length = kv.getLength(); - // This does same as DataOuput#writeInt (big-endian, etc.) - out.write(Bytes.toBytes(length)); - out.write(kv.getBuffer(), kv.getOffset(), length); - return length + Bytes.SIZEOF_INT; - } - - /** - * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable but do - * not require a {@link DataOutput}, just take plain {@link OutputStream} - * Named oswrite so does not clash with {@link #write(KeyValue, DataOutput)} + * Write out a KeyValue in the manner in which we used to when KeyValue was a + * Writable but do not require a DataOutput, just take plain + * {@link OutputStream} Named oswrite so does not clash with + * {@link KeyValueUtil#write(KeyValue, DataOutput)} + * * @param kv * @param out * @param withTags * @return Length written on stream * @throws IOException - * @see #create(DataInput) for the inverse function - * @see #write(KeyValue, DataOutput) + * @see KeyValueUtil#create(DataInput) for the inverse function + * @see KeyValueUtil#write(KeyValue, DataOutput) * @see KeyValueUtil#oswrite(Cell, OutputStream, boolean) */ public static long oswrite(final KeyValue kv, final OutputStream out, final boolean withTags) throws IOException { - // In KeyValueUtil#oswrite we do a Cell serialization as KeyValue. Any changes doing here, pls + // In KeyValueUtil#oswrite we do a Cell serialization as KeyValue. Any + // changes doing here, pls // check KeyValueUtil#oswrite also and do necessary changes. int length = kv.getLength(); if (!withTags) { @@ -2524,102 +1693,6 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, } /** - * Comparator that compares row component only of a KeyValue. - */ - public static class RowOnlyComparator implements Comparator { - final KVComparator comparator; - - public RowOnlyComparator(final KVComparator c) { - this.comparator = c; - } - - @Override - public int compare(KeyValue left, KeyValue right) { - return comparator.compareRows(left, right); - } - } - - - /** - * Avoids redundant comparisons for better performance. - * - * TODO get rid of this wart - */ - public interface SamePrefixComparator { - /** - * Compare two keys assuming that the first n bytes are the same. - * @param commonPrefix How many bytes are the same. - */ - int compareIgnoringPrefix(int commonPrefix, byte[] left, int loffset, int llength, - byte[] right, int roffset, int rlength - ); - } - - /** - * This is a TEST only Comparator used in TestSeekTo and TestReseekTo. - */ - public static class RawBytesComparator extends KVComparator { - /** - * The HFileV2 file format's trailer contains this class name. We reinterpret this and - * instantiate the appropriate comparator. - * TODO: With V3 consider removing this. - * @return legacy class name for FileFileTrailer#comparatorClassName - */ - @Override - public String getLegacyKeyComparatorName() { - return "org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator"; - } - - /** - * @deprecated Since 0.99.2. - */ - @Override - @Deprecated - public int compareFlatKey(byte[] left, int loffset, int llength, byte[] right, - int roffset, int rlength) { - return Bytes.BYTES_RAWCOMPARATOR.compare(left, loffset, llength, right, roffset, rlength); - } - - @Override - public int compare(Cell left, Cell right) { - return compareOnlyKeyPortion(left, right); - } - - @Override - @VisibleForTesting - public int compareOnlyKeyPortion(Cell left, Cell right) { - int c = Bytes.BYTES_RAWCOMPARATOR.compare(left.getRowArray(), left.getRowOffset(), - left.getRowLength(), right.getRowArray(), right.getRowOffset(), right.getRowLength()); - if (c != 0) { - return c; - } - c = Bytes.BYTES_RAWCOMPARATOR.compare(left.getFamilyArray(), left.getFamilyOffset(), - left.getFamilyLength(), right.getFamilyArray(), right.getFamilyOffset(), - right.getFamilyLength()); - if (c != 0) { - return c; - } - c = Bytes.BYTES_RAWCOMPARATOR.compare(left.getQualifierArray(), left.getQualifierOffset(), - left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), - right.getQualifierLength()); - if (c != 0) { - return c; - } - c = compareTimestamps(left.getTimestamp(), right.getTimestamp()); - if (c != 0) { - return c; - } - return (0xff & left.getTypeByte()) - (0xff & right.getTypeByte()); - } - - @Override - public byte[] calcIndexKey(byte[] lastKeyOfPreviousBlock, byte[] firstKeyInBlock) { - return firstKeyInBlock; - } - - } - - /** * HeapSize implementation * * We do not count the bytes in the rowCache because it should be empty for a KeyValue in the diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java index f0c8b48..50a409d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java @@ -85,7 +85,7 @@ public class KeyValueTestUtil { for (Cell kv1 : kvCollection1) { boolean found = false; for (Cell kv2 : kvCollection2) { - if (CellComparator.equalsIgnoreMvccVersion(kv1, kv2)) found = true; + if (CellUtil.equalsIgnoreMvccVersion(kv1, kv2)) found = true; } if (!found) return false; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java index 7cbfdd6..4ef14fc 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java @@ -18,7 +18,10 @@ package org.apache.hadoop.hbase; +import java.io.DataInput; +import java.io.DataOutput; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -31,6 +34,7 @@ import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.IterableUtils; import org.apache.hadoop.hbase.util.SimpleMutableByteRange; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.WritableUtils; import com.google.common.base.Function; @@ -541,6 +545,122 @@ public class KeyValueUtil { }); return new ArrayList(lazyList); } + /** + * Write out a KeyValue in the manner in which we used to when KeyValue was a + * Writable. + * + * @param kv + * @param out + * @return Length written on stream + * @throws IOException + * @see #create(DataInput) for the inverse function + */ + public static long write(final KeyValue kv, final DataOutput out) throws IOException { + // This is how the old Writables write used to serialize KVs. Need to figure + // way to make it + // work for all implementations. + int length = kv.getLength(); + out.writeInt(length); + out.write(kv.getBuffer(), kv.getOffset(), length); + return length + Bytes.SIZEOF_INT; + } + + /** + * Create a KeyValue reading from the raw InputStream. Named + * iscreate so doesn't clash with {@link #create(DataInput)} + * + * @param in + * @return Created KeyValue OR if we find a length of zero, we will return + * null which can be useful marking a stream as done. + * @throws IOException + */ + public static KeyValue iscreate(final InputStream in) throws IOException { + byte[] intBytes = new byte[Bytes.SIZEOF_INT]; + int bytesRead = 0; + while (bytesRead < intBytes.length) { + int n = in.read(intBytes, bytesRead, intBytes.length - bytesRead); + if (n < 0) { + if (bytesRead == 0) + return null; // EOF at start is ok + throw new IOException("Failed read of int, read " + bytesRead + " bytes"); + } + bytesRead += n; + } + // TODO: perhaps some sanity check is needed here. + byte[] bytes = new byte[Bytes.toInt(intBytes)]; + IOUtils.readFully(in, bytes, 0, bytes.length); + return new KeyValue(bytes, 0, bytes.length); + } + + /** + * @param b + * @return A KeyValue made of a byte array that holds the key-only part. + * Needed to convert hfile index members to KeyValues. + */ + public static KeyValue createKeyValueFromKey(final byte[] b) { + return createKeyValueFromKey(b, 0, b.length); + } + + /** + * @param bb + * @return A KeyValue made of a byte buffer that holds the key-only part. + * Needed to convert hfile index members to KeyValues. + */ + public static KeyValue createKeyValueFromKey(final ByteBuffer bb) { + return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit()); + } + + /** + * @param b + * @param o + * @param l + * @return A KeyValue made of a byte array that holds the key-only part. + * Needed to convert hfile index members to KeyValues. + */ + public static KeyValue createKeyValueFromKey(final byte[] b, final int o, final int l) { + byte[] newb = new byte[l + KeyValue.ROW_OFFSET]; + System.arraycopy(b, o, newb, KeyValue.ROW_OFFSET, l); + Bytes.putInt(newb, 0, l); + Bytes.putInt(newb, Bytes.SIZEOF_INT, 0); + return new KeyValue(newb); + } + + /** + * @param in + * Where to read bytes from. Creates a byte array to hold the + * KeyValue backing bytes copied from the steam. + * @return KeyValue created by deserializing from in OR if we + * find a length of zero, we will return null which can be useful + * marking a stream as done. + * @throws IOException + */ + public static KeyValue create(final DataInput in) throws IOException { + return create(in.readInt(), in); + } + + /** + * Create a KeyValue reading length from in + * + * @param length + * @param in + * @return Created KeyValue OR if we find a length of zero, we will return + * null which can be useful marking a stream as done. + * @throws IOException + */ + public static KeyValue create(int length, final DataInput in) throws IOException { + + if (length <= 0) { + if (length == 0) + return null; + throw new IOException("Failed read " + length + " bytes, stream corrupt?"); + } + + // This is how the old Writables.readFrom used to deserialize. Didn't even + // vint. + byte[] bytes = new byte[length]; + in.readFully(bytes); + return new KeyValue(bytes, 0, length); + } public static void oswrite(final Cell cell, final OutputStream out, final boolean withTags) throws IOException { @@ -576,7 +696,8 @@ public class KeyValueUtil { // write tags if we have to if (withTags && tlen > 0) { // 2 bytes tags length followed by tags bytes - // tags length is serialized with 2 bytes only(short way) even if the type is int. As this + // tags length is serialized with 2 bytes only(short way) even if the + // type is int. As this // is non -ve numbers, we save the sign bit. See HBASE-11437 out.write((byte) (0xff & (tlen >> 8))); out.write((byte) (0xff & tlen)); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java index 17fd3b7..c8e57ea 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -23,7 +23,6 @@ import java.util.Arrays; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.Bytes; @@ -514,10 +513,10 @@ public final class TableName implements Comparable { * * @return The comparator. */ - public KVComparator getRowComparator() { + public CellComparator getRowComparator() { if(TableName.META_TABLE_NAME.equals(this)) { - return KeyValue.META_COMPARATOR; + return CellComparator.META_COMPARATOR; } - return KeyValue.COMPARATOR; + return CellComparator.COMPARATOR; } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java index f41d6b0..7610b30 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java @@ -23,7 +23,6 @@ import java.io.OutputStream; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -65,7 +64,7 @@ public class KeyValueCodec implements Codec { } protected Cell parseCell() throws IOException { - return KeyValue.iscreate(in); + return KeyValueUtil.iscreate(in); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java index 664fcac..63cea49 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java @@ -23,7 +23,6 @@ import java.io.OutputStream; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -71,7 +70,7 @@ public class KeyValueCodecWithTags implements Codec { } protected Cell parseCell() throws IOException { - return KeyValue.iscreate(in); + return KeyValueUtil.iscreate(in); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java index 5b049fd..f79ae36 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java @@ -26,8 +26,6 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.KeyValue.SamePrefixComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.SettableSequenceId; @@ -520,17 +518,15 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { BufferedEncodedSeeker implements EncodedSeeker { protected HFileBlockDecodingContext decodingCtx; - protected final KVComparator comparator; - protected final SamePrefixComparator samePrefixComparator; + protected final CellComparator comparator; protected ByteBuffer currentBuffer; protected STATE current = createSeekerState(); // always valid protected STATE previous = createSeekerState(); // may not be valid protected TagCompressionContext tagCompressionContext = null; - public BufferedEncodedSeeker(KVComparator comparator, + public BufferedEncodedSeeker(CellComparator comparator, HFileBlockDecodingContext decodingCtx) { this.comparator = comparator; - this.samePrefixComparator = comparator; this.decodingCtx = decodingCtx; if (decodingCtx.getHFileContext().isCompressTags()) { try { @@ -540,7 +536,7 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { } } } - + protected boolean includesMvcc() { return this.decodingCtx.getHFileContext().isIncludesMvcc(); } @@ -550,13 +546,7 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { } @Override - public int compareKey(KVComparator comparator, byte[] key, int offset, int length) { - return comparator.compareFlatKey(key, offset, length, - current.keyBuffer, 0, current.keyLength); - } - - @Override - public int compareKey(KVComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, Cell key) { return comparator.compareOnlyKeyPortion(key, new KeyValue.KeyOnlyKeyValue(current.keyBuffer, 0, current.keyLength)); } @@ -685,7 +675,7 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { KeyValue.KeyOnlyKeyValue currentCell = new KeyValue.KeyOnlyKeyValue(); do { int comp; - if (samePrefixComparator != null) { + if (comparator != null) { currentCell.setKey(current.keyBuffer, 0, current.keyLength); if (current.lastCommonPrefix != 0) { // The KV format has row key length also in the byte array. The @@ -698,9 +688,10 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { if (current.lastCommonPrefix <= 2) { rowCommonPrefix = 0; } - rowCommonPrefix += CellComparator.findCommonPrefixInRowPart(seekCell, currentCell, + rowCommonPrefix += findCommonPrefixInRowPart(seekCell, currentCell, + rowCommonPrefix); + comp = CellComparator.compareCommonRowPrefix(seekCell, currentCell, rowCommonPrefix); - comp = CellComparator.compareCommonRowPrefix(seekCell, currentCell, rowCommonPrefix); if (comp == 0) { comp = compareTypeBytes(seekCell, currentCell); if (comp == 0) { @@ -709,7 +700,7 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { 0, Math.min(familyCommonPrefix, current.lastCommonPrefix - (3 + currentCell.getRowLength()))); - familyCommonPrefix += CellComparator.findCommonPrefixInFamilyPart(seekCell, + familyCommonPrefix += findCommonPrefixInFamilyPart(seekCell, currentCell, familyCommonPrefix); comp = CellComparator.compareCommonFamilyPrefix(seekCell, currentCell, familyCommonPrefix); @@ -722,7 +713,7 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { qualCommonPrefix, current.lastCommonPrefix - (3 + currentCell.getRowLength() + currentCell.getFamilyLength()))); - qualCommonPrefix += CellComparator.findCommonPrefixInQualifierPart(seekCell, + qualCommonPrefix += findCommonPrefixInQualifierPart(seekCell, currentCell, qualCommonPrefix); comp = CellComparator.compareCommonQualifierPrefix(seekCell, currentCell, qualCommonPrefix); @@ -798,6 +789,27 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { return 0; } + private static int findCommonPrefixInRowPart(Cell left, Cell right, int rowCommonPrefix) { + return Bytes.findCommonPrefix(left.getRowArray(), right.getRowArray(), left.getRowLength() + - rowCommonPrefix, right.getRowLength() - rowCommonPrefix, left.getRowOffset() + + rowCommonPrefix, right.getRowOffset() + rowCommonPrefix); + } + + private static int findCommonPrefixInFamilyPart(Cell left, Cell right, int familyCommonPrefix) { + return Bytes + .findCommonPrefix(left.getFamilyArray(), right.getFamilyArray(), left.getFamilyLength() + - familyCommonPrefix, right.getFamilyLength() - familyCommonPrefix, + left.getFamilyOffset() + familyCommonPrefix, right.getFamilyOffset() + + familyCommonPrefix); + } + + private static int findCommonPrefixInQualifierPart(Cell left, Cell right, + int qualifierCommonPrefix) { + return Bytes.findCommonPrefix(left.getQualifierArray(), right.getQualifierArray(), + left.getQualifierLength() - qualifierCommonPrefix, right.getQualifierLength() + - qualifierCommonPrefix, left.getQualifierOffset() + qualifierCommonPrefix, + right.getQualifierOffset() + qualifierCommonPrefix); + } private void moveToPrevious() { if (!previous.isValid()) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java index 6b87c77..6fa01c0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java @@ -22,9 +22,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.ByteBufferUtils; @@ -81,7 +81,7 @@ public class CopyKeyDataBlockEncoder extends BufferedDataBlockEncoder { } @Override - public EncodedSeeker createSeeker(KVComparator comparator, + public EncodedSeeker createSeeker(CellComparator comparator, final HFileBlockDecodingContext decodingCtx) { return new BufferedEncodedSeeker(comparator, decodingCtx) { @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java index 8073e54..f084689 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -98,7 +98,7 @@ public interface DataBlockEncoder { * @param decodingCtx * @return A newly created seeker. */ - EncodedSeeker createSeeker(KVComparator comparator, + EncodedSeeker createSeeker(CellComparator comparator, HFileBlockDecodingContext decodingCtx); /** @@ -193,12 +193,8 @@ public interface DataBlockEncoder { * Compare the given key against the current key * @param comparator * @param key - * @param offset - * @param length * @return -1 is the passed key is smaller than the current key, 0 if equal and 1 if greater */ - public int compareKey(KVComparator comparator, byte[] key, int offset, int length); - - public int compareKey(KVComparator comparator, Cell key); + public int compareKey(CellComparator comparator, Cell key); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java index 4182dc4..c55400b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java @@ -22,9 +22,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.ByteBufferUtils; @@ -372,7 +372,7 @@ public class DiffKeyDeltaEncoder extends BufferedDataBlockEncoder { } @Override - public EncodedSeeker createSeeker(KVComparator comparator, + public EncodedSeeker createSeeker(CellComparator comparator, HFileBlockDecodingContext decodingCtx) { return new BufferedEncodedSeeker(comparator, decodingCtx) { private byte[] familyNameWithSize; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java index a6f43d0..3000993 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java @@ -22,9 +22,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.ByteBufferUtils; @@ -392,7 +392,7 @@ public class FastDiffDeltaEncoder extends BufferedDataBlockEncoder { } @Override - public EncodedSeeker createSeeker(KVComparator comparator, + public EncodedSeeker createSeeker(CellComparator comparator, final HFileBlockDecodingContext decodingCtx) { return new BufferedEncodedSeeker(comparator, decodingCtx) { private void decode(boolean isFirst) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java index 0286eca..1350366 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java @@ -22,9 +22,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.ByteBufferUtils; @@ -196,7 +196,7 @@ public class PrefixKeyDeltaEncoder extends BufferedDataBlockEncoder { } @Override - public EncodedSeeker createSeeker(KVComparator comparator, + public EncodedSeeker createSeeker(CellComparator comparator, final HFileBlockDecodingContext decodingCtx) { return new BufferedEncodedSeeker(comparator, decodingCtx) { @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index 8096178..e24e585 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -46,6 +46,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.WritableComparator; @@ -1369,7 +1370,7 @@ public class Bytes implements Comparable { * @param offset Offset into array at which vint begins. * @throws java.io.IOException e * @return deserialized long from buffer. - * @deprecated Use {@link #readAsVLong()} instead. + * @deprecated Use {@link #readAsVLong(byte[], int)} instead. */ @Deprecated public static long readVLong(final byte [] buffer, final int offset) @@ -2044,6 +2045,41 @@ public class Bytes implements Comparable { * @param key the key you want to find * @param offset the offset in the key you want to find * @param length the length of the key + * @return zero-based index of the key, if the key is present in the array. + * Otherwise, a value -(i + 1) such that the key is between arr[i - + * 1] and arr[i] non-inclusively, where i is in [0, i], if we define + * arr[-1] = -Inf and arr[N] = Inf for an N-element array. The above + * means that this function can return 2N + 1 different values + * ranging from -(N + 1) to N - 1. + */ + public static int binarySearch(byte[][] arr, byte[] key, int offset, int length) { + int low = 0; + int high = arr.length - 1; + + while (low <= high) { + int mid = (low + high) >>> 1; + int cmp = Bytes.BYTES_RAWCOMPARATOR + .compare(key, offset, length, arr[mid], 0, arr[mid].length); + // key lives above the midpoint + if (cmp > 0) + low = mid + 1; + // key lives below the midpoint + else if (cmp < 0) + high = mid - 1; + // BAM. how often does this really happen? + else + return mid; + } + return -(low + 1); + } + + /** + * Binary search for keys in indexes. + * + * @param arr array of byte arrays to search for + * @param key the key you want to find + * @param offset the offset in the key you want to find + * @param length the length of the key * @param comparator a comparator to compare. * @return zero-based index of the key, if the key is present in the array. * Otherwise, a value -(i + 1) such that the key is between arr[i - @@ -2053,16 +2089,15 @@ public class Bytes implements Comparable { * ranging from -(N + 1) to N - 1. */ public static int binarySearch(byte [][]arr, byte []key, int offset, - int length, RawComparator comparator) { + int length, CellComparator comparator) { int low = 0; int high = arr.length - 1; - + KeyValue.KeyOnlyKeyValue keyOnlyKv = new KeyValue.KeyOnlyKeyValue(key, offset, length); while (low <= high) { int mid = (low+high) >>> 1; // we have to compare in this order, because the comparator order // has special logic when the 'left side' is a special key. - int cmp = comparator.compare(key, offset, length, - arr[mid], 0, arr[mid].length); + int cmp = comparator.compare(keyOnlyKv, arr[mid], 0, arr[mid].length); // key lives above the midpoint if (cmp > 0) low = mid + 1; @@ -2090,7 +2125,7 @@ public class Bytes implements Comparable { * ranging from -(N + 1) to N - 1. * @return the index of the block */ - public static int binarySearch(byte[][] arr, Cell key, RawComparator comparator) { + public static int binarySearch(byte[][] arr, Cell key, Comparator comparator) { int low = 0; int high = arr.length - 1; KeyValue.KeyOnlyKeyValue r = new KeyValue.KeyOnlyKeyValue(); @@ -2521,4 +2556,58 @@ public class Bytes implements Comparable { return b; } + /** + * @param b + * @param delimiter + * @return Index of delimiter having started from start of b + * moving rightward. + */ + public static int searchDelimiterIndex(final byte[] b, int offset, final int length, + final int delimiter) { + if (b == null) { + throw new IllegalArgumentException("Passed buffer is null"); + } + int result = -1; + for (int i = offset; i < length + offset; i++) { + if (b[i] == delimiter) { + result = i; + break; + } + } + return result; + } + + /** + * Find index of passed delimiter walking from end of buffer backwards. + * + * @param b + * @param delimiter + * @return Index of delimiter + */ + public static int searchDelimiterIndexInReverse(final byte[] b, final int offset, + final int length, final int delimiter) { + if (b == null) { + throw new IllegalArgumentException("Passed buffer is null"); + } + int result = -1; + for (int i = (offset + length) - 1; i >= offset; i--) { + if (b[i] == delimiter) { + result = i; + break; + } + } + return result; + } + + public static int findCommonPrefix(byte[] left, byte[] right, int leftLength, int rightLength, + int leftOffset, int rightOffset) { + int length = Math.min(leftLength, rightLength); + int result = 0; + + while (result < length && left[leftOffset + result] == right[rightOffset + result]) { + result++; + } + return result; + } + } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java index 52bc4e0..26b513c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.Map; import java.util.Random; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -284,7 +285,7 @@ public class RedundantKVGenerator { } } - Collections.sort(result, KeyValue.COMPARATOR); + Collections.sort(result, CellComparator.COMPARATOR); return result; } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java index 007f826..111b8c8 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -30,6 +29,7 @@ import org.junit.experimental.categories.Category; @Category({MiscTests.class, SmallTests.class}) public class TestCellComparator { + private CellComparator comparator = new CellComparator(); byte[] row1 = Bytes.toBytes("row1"); byte[] row2 = Bytes.toBytes("row2"); byte[] row_1_0 = Bytes.toBytes("row10"); @@ -47,7 +47,7 @@ public class TestCellComparator { public void testCompareCells() { KeyValue kv1 = new KeyValue(row1, fam1, qual1, val); KeyValue kv2 = new KeyValue(row2, fam1, qual1, val); - assertTrue((CellComparator.compare(kv1, kv2, false)) < 0); + assertTrue((comparator.compare(kv1, kv2, false)) < 0); kv1 = new KeyValue(row1, fam2, qual1, val); kv2 = new KeyValue(row1, fam1, qual1, val); @@ -55,11 +55,11 @@ public class TestCellComparator { kv1 = new KeyValue(row1, fam1, qual1, 1l, val); kv2 = new KeyValue(row1, fam1, qual1, 2l, val); - assertTrue((CellComparator.compare(kv1, kv2, false) > 0)); + assertTrue((comparator.compare(kv1, kv2, false) > 0)); kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); kv2 = new KeyValue(row1, fam1, qual1, 1l, Type.Maximum); - assertTrue((CellComparator.compare(kv1, kv2, false) > 0)); + assertTrue((comparator.compare(kv1, kv2, false) > 0)); kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); kv2 = new KeyValue(row1, fam_1_2, qual1, 1l, Type.Maximum); @@ -75,70 +75,93 @@ public class TestCellComparator { kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); kv2 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); - assertTrue((CellComparator.equals(kv1, kv2))); + assertTrue((CellUtil.equals(kv1, kv2))); } @Test public void testGetShortMidpoint() { - KeyValue.KVComparator comparator = new KeyValue.KVComparator(); + CellComparator comparator = new CellComparator(); Cell left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); Cell right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - Cell mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) <= 0); - assertTrue(CellComparator.compare(mid, right, true) <= 0); + Cell mid = CellUtil.getMidpoint(comparator, left, right); + assertTrue(comparator.compare(left, mid, true) <= 0); + assertTrue(comparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("a")); - mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) <= 0); + mid = CellUtil.getMidpoint(comparator, left, right); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); - mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) <= 0); + mid = CellUtil.getMidpoint(comparator, left, right); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("bbbbbbb"), Bytes.toBytes("a"), Bytes.toBytes("a")); - mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) < 0); + mid = CellUtil.getMidpoint(comparator, left, right); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) < 0); assertEquals(1, (int)mid.getRowLength()); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("a")); - mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) <= 0); + mid = CellUtil.getMidpoint(comparator, left, right); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaa"), Bytes.toBytes("b")); - mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) < 0); + mid = CellUtil.getMidpoint(comparator, left, right); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) < 0); assertEquals(2, (int)mid.getFamilyLength()); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaaa")); - mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) < 0); + mid = CellUtil.getMidpoint(comparator, left, right); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) < 0); assertEquals(2, (int)mid.getQualifierLength()); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("b")); - mid = CellComparator.getMidpoint(comparator, left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) <= 0); + mid = CellUtil.getMidpoint(comparator, left, right); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) <= 0); assertEquals(1, (int)mid.getQualifierLength()); // Assert that if meta comparator, it returns the right cell -- i.e. no optimization done. left = CellUtil.createCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); - mid = CellComparator.getMidpoint(new KeyValue.MetaComparator(), left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) == 0); + mid = CellUtil.getMidpoint(CellComparator.META_COMPARATOR, left, right); + assertTrue(comparator.compare(left, mid, true) < 0); + assertTrue(comparator.compare(mid, right, true) == 0); + } + + @Test + public void testCompareCellWithKey() throws Exception { + KeyValue kv1 = new KeyValue(row1, fam1, qual1, val); + KeyValue kv2 = new KeyValue(row2, fam1, qual1, val); + assertTrue((comparator.compare(kv1, kv2.getKey(), 0, kv2.getKey().length)) < 0); + + kv1 = new KeyValue(row1, fam2, qual1, val); + kv2 = new KeyValue(row1, fam1, qual1, val); + assertTrue((comparator.compare(kv1, kv2.getKey(), 0, kv2.getKey().length)) > 0); + + kv1 = new KeyValue(row1, fam1, qual1, 1l, val); + kv2 = new KeyValue(row1, fam1, qual1, 2l, val); + assertTrue((comparator.compare(kv1, kv2.getKey(), 0, kv2.getKey().length)) > 0); + + kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); + kv2 = new KeyValue(row1, fam1, qual1, 1l, Type.Maximum); + assertTrue((comparator.compare(kv1, kv2.getKey(), 0, kv2.getKey().length)) > 0); + + kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); + kv2 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); + assertTrue((comparator.compare(kv1, kv2.getKey(), 0, kv2.getKey().length)) == 0); } } \ No newline at end of file diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index e1de0c3..14ab69e 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -34,8 +34,6 @@ import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.KeyValue.MetaComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.util.Bytes; @@ -109,24 +107,24 @@ public class TestKeyValue extends TestCase { final byte [] qf = Bytes.toBytes("umn"); KeyValue aaa = new KeyValue(a, fam, qf, a); KeyValue bbb = new KeyValue(b, fam, qf, b); - assertTrue(KeyValue.COMPARATOR.compare(aaa, bbb) < 0); - assertTrue(KeyValue.COMPARATOR.compare(bbb, aaa) > 0); + assertTrue(CellComparator.COMPARATOR.compare(aaa, bbb) < 0); + assertTrue(CellComparator.COMPARATOR.compare(bbb, aaa) > 0); // Compare breaks if passed same ByteBuffer as both left and right arguments. - assertTrue(KeyValue.COMPARATOR.compare(bbb, bbb) == 0); - assertTrue(KeyValue.COMPARATOR.compare(aaa, aaa) == 0); + assertTrue(CellComparator.COMPARATOR.compare(bbb, bbb) == 0); + assertTrue(CellComparator.COMPARATOR.compare(aaa, aaa) == 0); // Do compare with different timestamps. aaa = new KeyValue(a, fam, qf, 1, a); bbb = new KeyValue(a, fam, qf, 2, a); - assertTrue(KeyValue.COMPARATOR.compare(aaa, bbb) > 0); - assertTrue(KeyValue.COMPARATOR.compare(bbb, aaa) < 0); - assertTrue(KeyValue.COMPARATOR.compare(aaa, aaa) == 0); + assertTrue(CellComparator.COMPARATOR.compare(aaa, bbb) > 0); + assertTrue(CellComparator.COMPARATOR.compare(bbb, aaa) < 0); + assertTrue(CellComparator.COMPARATOR.compare(aaa, aaa) == 0); // Do compare with different types. Higher numbered types -- Delete // should sort ahead of lower numbers; i.e. Put aaa = new KeyValue(a, fam, qf, 1, KeyValue.Type.Delete, a); bbb = new KeyValue(a, fam, qf, 1, a); - assertTrue(KeyValue.COMPARATOR.compare(aaa, bbb) < 0); - assertTrue(KeyValue.COMPARATOR.compare(bbb, aaa) > 0); - assertTrue(KeyValue.COMPARATOR.compare(aaa, aaa) == 0); + assertTrue(CellComparator.COMPARATOR.compare(aaa, bbb) < 0); + assertTrue(CellComparator.COMPARATOR.compare(bbb, aaa) > 0); + assertTrue(CellComparator.COMPARATOR.compare(aaa, aaa) == 0); } public void testMoreComparisons() throws Exception { @@ -137,7 +135,7 @@ public class TestKeyValue extends TestCase { Bytes.toBytes("TestScanMultipleVersions,row_0500,1236020145502"), now); KeyValue bbb = new KeyValue( Bytes.toBytes("TestScanMultipleVersions,,99999999999999"), now); - KVComparator c = new KeyValue.MetaComparator(); + CellComparator c = CellComparator.META_COMPARATOR; assertTrue(c.compare(bbb, aaa) < 0); KeyValue aaaa = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,,1236023996656"), @@ -152,13 +150,13 @@ public class TestKeyValue extends TestCase { Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1236034574912L, (byte[])null); assertTrue(c.compare(x, y) < 0); - comparisons(new KeyValue.MetaComparator()); - comparisons(new KeyValue.KVComparator()); - metacomparisons(new KeyValue.MetaComparator()); + comparisons(CellComparator.META_COMPARATOR); + comparisons(CellComparator.COMPARATOR); + metacomparisons(CellComparator.META_COMPARATOR); } public void testMetaComparatorTableKeysWithCommaOk() { - MetaComparator c = new KeyValue.MetaComparator(); + CellComparator c = CellComparator.META_COMPARATOR; long now = System.currentTimeMillis(); // meta keys values are not quite right. A users can enter illegal values // from shell when scanning meta. @@ -179,17 +177,17 @@ public class TestKeyValue extends TestCase { Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); KeyValue rowB = new KeyValue(Bytes.toBytes("testtable,www.hbase.org/%20,99999"), Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0); + assertTrue(CellComparator.META_COMPARATOR.compare(rowA, rowB) < 0); rowA = new KeyValue(Bytes.toBytes("testtable,,1234"), Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); rowB = new KeyValue(Bytes.toBytes("testtable,$www.hbase.org/,99999"), Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0); + assertTrue(CellComparator.META_COMPARATOR.compare(rowA, rowB) < 0); } - private void metacomparisons(final KeyValue.MetaComparator c) { + private void metacomparisons(final CellComparator c) { long now = System.currentTimeMillis(); assertTrue(c.compare(new KeyValue( Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now), @@ -206,7 +204,7 @@ public class TestKeyValue extends TestCase { Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)) > 0); } - private void comparisons(final KeyValue.KVComparator c) { + private void comparisons(final CellComparator c) { long now = System.currentTimeMillis(); assertTrue(c.compare(new KeyValue( Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now), @@ -223,7 +221,7 @@ public class TestKeyValue extends TestCase { } public void testBinaryKeys() throws Exception { - Set set = new TreeSet(KeyValue.COMPARATOR); + Set set = new TreeSet(CellComparator.COMPARATOR); final byte [] fam = Bytes.toBytes("col"); final byte [] qf = Bytes.toBytes("umn"); final byte [] nb = new byte[0]; @@ -249,7 +247,7 @@ public class TestKeyValue extends TestCase { } assertTrue(assertion); // Make set with good comparator - set = new TreeSet(new KeyValue.MetaComparator()); + set = new TreeSet(CellComparator.META_COMPARATOR); Collections.addAll(set, keys); count = 0; for (KeyValue k: set) { @@ -271,7 +269,7 @@ public class TestKeyValue extends TestCase { private final byte[] qualA = Bytes.toBytes("qfA"); private final byte[] qualB = Bytes.toBytes("qfB"); - private void assertKVLess(KeyValue.KVComparator c, + private void assertKVLess(CellComparator c, KeyValue less, KeyValue greater) { int cmp = c.compare(less,greater); @@ -280,20 +278,16 @@ public class TestKeyValue extends TestCase { assertTrue(cmp > 0); } - private void assertKVLessWithoutRow(KeyValue.KVComparator c, int common, KeyValue less, + private void assertKVLessWithoutRow(CellComparator c, int common, KeyValue less, KeyValue greater) { - int cmp = c.compareIgnoringPrefix(common, less.getBuffer(), less.getOffset() - + KeyValue.ROW_OFFSET, less.getKeyLength(), greater.getBuffer(), - greater.getOffset() + KeyValue.ROW_OFFSET, greater.getKeyLength()); + int cmp = c.compare(less, greater); assertTrue(cmp < 0); - cmp = c.compareIgnoringPrefix(common, greater.getBuffer(), greater.getOffset() - + KeyValue.ROW_OFFSET, greater.getKeyLength(), less.getBuffer(), - less.getOffset() + KeyValue.ROW_OFFSET, less.getKeyLength()); + cmp = c.compare(greater, less); assertTrue(cmp > 0); } public void testCompareWithoutRow() { - final KeyValue.KVComparator c = KeyValue.COMPARATOR; + final CellComparator c = CellComparator.COMPARATOR; byte[] row = Bytes.toBytes("row"); byte[] fa = Bytes.toBytes("fa"); @@ -340,7 +334,7 @@ public class TestKeyValue extends TestCase { } public void testFirstLastOnRow() { - final KVComparator c = KeyValue.COMPARATOR; + final CellComparator c = CellComparator.COMPARATOR; long ts = 1; byte[] bufferA = new byte[128]; int offsetA = 0; @@ -414,7 +408,7 @@ public class TestKeyValue extends TestCase { byte[] tmpArr = new byte[initialPadding + endingPadding + keyLen]; System.arraycopy(kv.getBuffer(), kv.getKeyOffset(), tmpArr, initialPadding, keyLen); - KeyValue kvFromKey = KeyValue.createKeyValueFromKey(tmpArr, initialPadding, + KeyValue kvFromKey = KeyValueUtil.createKeyValueFromKey(tmpArr, initialPadding, keyLen); assertEquals(keyLen, kvFromKey.getKeyLength()); assertEquals(KeyValue.ROW_OFFSET + keyLen, kvFromKey.getBuffer().length); @@ -443,76 +437,73 @@ public class TestKeyValue extends TestCase { * See HBASE-7845 */ public void testGetShortMidpointKey() { - final KVComparator keyComparator = KeyValue.COMPARATOR; + final CellComparator keyComparator = CellComparator.COMPARATOR; //verify that faked shorter rowkey could be generated long ts = 5; KeyValue kv1 = new KeyValue(Bytes.toBytes("the quick brown fox"), family, qualA, ts, Type.Put); KeyValue kv2 = new KeyValue(Bytes.toBytes("the who test text"), family, qualA, ts, Type.Put); - byte[] newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey()); - assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0); - assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) < 0); - short newRowLength = Bytes.toShort(newKey, 0); + Cell newKey = CellUtil.getMidpoint(keyComparator, kv1, kv2); + assertTrue(keyComparator.compare(kv1, newKey) < 0); + assertTrue(-(keyComparator.compare(kv2, newKey)) < 0); byte[] expectedArray = Bytes.toBytes("the r"); - Bytes.equals(newKey, KeyValue.ROW_LENGTH_SIZE, newRowLength, expectedArray, 0, - expectedArray.length); + Bytes.equals(newKey.getRowArray(), newKey.getRowOffset(), newKey.getRowLength(), + expectedArray, + 0, expectedArray.length); //verify: same with "row + family + qualifier", return rightKey directly kv1 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, 5, Type.Put); kv2 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, 0, Type.Put); - assertTrue(keyComparator.compareFlatKey(kv1.getKey(), kv2.getKey()) < 0); - newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey()); - assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0); - assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) == 0); + assertTrue(keyComparator.compare(kv1, kv2) < 0); + newKey = CellUtil.getMidpoint(keyComparator, kv1, kv2); + assertTrue(keyComparator.compare(kv1, newKey) < 0); + assertTrue((-keyComparator.compare(kv2, newKey)) == 0); kv1 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, -5, Type.Put); kv2 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, -10, Type.Put); - assertTrue(keyComparator.compareFlatKey(kv1.getKey(), kv2.getKey()) < 0); - newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey()); - assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0); - assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) == 0); + assertTrue(keyComparator.compare(kv1, kv2) < 0); + newKey = CellUtil.getMidpoint(keyComparator, kv1, kv2); + assertTrue(keyComparator.compare(kv1, newKey) < 0); + assertTrue((-keyComparator.compare(kv2, newKey)) == 0); // verify: same with row, different with qualifier kv1 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, 5, Type.Put); kv2 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualB, 5, Type.Put); - assertTrue(keyComparator.compareFlatKey(kv1.getKey(), kv2.getKey()) < 0); - newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey()); - assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0); - assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) < 0); - KeyValue newKeyValue = KeyValue.createKeyValueFromKey(newKey); - assertTrue(Arrays.equals(newKeyValue.getFamily(),family)); - assertTrue(Arrays.equals(newKeyValue.getQualifier(),qualB)); - assertTrue(newKeyValue.getTimestamp() == HConstants.LATEST_TIMESTAMP); - assertTrue(newKeyValue.getTypeByte() == Type.Maximum.getCode()); + assertTrue(keyComparator.compare(kv1, kv2) < 0); + newKey = CellUtil.getMidpoint(keyComparator, kv1, kv2); + assertTrue(keyComparator.compare(kv1, newKey) < 0); + assertTrue(-(keyComparator.compare(kv2, newKey)) < 0); + assertTrue(Arrays.equals(newKey.getFamily(),family)); + assertTrue(Arrays.equals(newKey.getQualifier(),qualB)); + assertTrue(newKey.getTimestamp() == HConstants.LATEST_TIMESTAMP); + assertTrue(newKey.getTypeByte() == Type.Maximum.getCode()); //verify metaKeyComparator's getShortMidpointKey output - final KVComparator metaKeyComparator = KeyValue.META_COMPARATOR; + final CellComparator metaKeyComparator = CellComparator.META_COMPARATOR; kv1 = new KeyValue(Bytes.toBytes("ilovehbase123"), family, qualA, 5, Type.Put); kv2 = new KeyValue(Bytes.toBytes("ilovehbase234"), family, qualA, 0, Type.Put); - newKey = metaKeyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey()); - assertTrue(metaKeyComparator.compareFlatKey(kv1.getKey(), newKey) < 0); - assertTrue(metaKeyComparator.compareFlatKey(newKey, kv2.getKey()) == 0); + newKey = CellUtil.getMidpoint(metaKeyComparator, kv1, kv2); + assertTrue(metaKeyComparator.compare(kv1, newKey) < 0); + assertTrue((-metaKeyComparator.compare(kv2, newKey) == 0)); //verify common fix scenario kv1 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, ts, Type.Put); kv2 = new KeyValue(Bytes.toBytes("ilovehbaseandhdfs"), family, qualA, ts, Type.Put); - assertTrue(keyComparator.compareFlatKey(kv1.getKey(), kv2.getKey()) < 0); - newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey()); - assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0); - assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) < 0); - newRowLength = Bytes.toShort(newKey, 0); + assertTrue(keyComparator.compare(kv1, kv2) < 0); + newKey = CellUtil.getMidpoint(keyComparator, kv1, kv2); + assertTrue(keyComparator.compare(kv1, newKey) < 0); + assertTrue(-(keyComparator.compare(kv2, newKey)) < 0); expectedArray = Bytes.toBytes("ilovehbasea"); - Bytes.equals(newKey, KeyValue.ROW_LENGTH_SIZE, newRowLength, expectedArray, 0, - expectedArray.length); + Bytes.equals(newKey.getRowArray(), newKey.getRowOffset(), newKey.getRowLength(), expectedArray, + 0, expectedArray.length); //verify only 1 offset scenario kv1 = new KeyValue(Bytes.toBytes("100abcdefg"), family, qualA, ts, Type.Put); kv2 = new KeyValue(Bytes.toBytes("101abcdefg"), family, qualA, ts, Type.Put); - assertTrue(keyComparator.compareFlatKey(kv1.getKey(), kv2.getKey()) < 0); - newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey()); - assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0); - assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) < 0); - newRowLength = Bytes.toShort(newKey, 0); + assertTrue(keyComparator.compare(kv1, kv2) < 0); + newKey = CellUtil.getMidpoint(keyComparator, kv1, kv2); + assertTrue(keyComparator.compare(kv1, newKey) < 0); + assertTrue(-(keyComparator.compare(kv2, newKey)) < 0); expectedArray = Bytes.toBytes("101"); - Bytes.equals(newKey, KeyValue.ROW_LENGTH_SIZE, newRowLength, expectedArray, 0, - expectedArray.length); + Bytes.equals(newKey.getRowArray(), newKey.getRowOffset(), newKey.getRowLength(), expectedArray, + 0, expectedArray.length); } public void testKVsWithTags() { @@ -577,7 +568,7 @@ public class TestKeyValue extends TestCase { } public void testMetaKeyComparator() { - MetaComparator c = new KeyValue.MetaComparator(); + CellComparator c = CellComparator.META_COMPARATOR; long now = System.currentTimeMillis(); KeyValue a = new KeyValue(Bytes.toBytes("table1"), now); @@ -652,9 +643,9 @@ public class TestKeyValue extends TestCase { KeyValueUtil.oswrite(mkvA2, os, true); DataInputStream is = new DataInputStream(new ByteArrayInputStream( byteArrayOutputStream.toByteArray())); - KeyValue deSerKV1 = KeyValue.iscreate(is); + KeyValue deSerKV1 = KeyValueUtil.iscreate(is); assertTrue(kvA1.equals(deSerKV1)); - KeyValue deSerKV2 = KeyValue.iscreate(is); + KeyValue deSerKV2 = KeyValueUtil.iscreate(is); assertTrue(kvA2.equals(deSerKV2)); } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java index 922de6f..1ce4bf6 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java @@ -29,6 +29,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -112,13 +113,13 @@ public class TestCellCodec { Codec.Decoder decoder = codec.getDecoder(dis); assertTrue(decoder.advance()); Cell c = decoder.current(); - assertTrue(CellComparator.equals(c, kv1)); + assertTrue(CellUtil.equals(c, kv1)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv2)); + assertTrue(CellUtil.equals(c, kv2)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv3)); + assertTrue(CellUtil.equals(c, kv3)); assertFalse(decoder.advance()); dis.close(); assertEquals(offset, cis.getCount()); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java index 30f2f00..beff87a 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java @@ -29,7 +29,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; @@ -76,7 +76,7 @@ public class TestCellCodecWithTags { Codec.Decoder decoder = codec.getDecoder(dis); assertTrue(decoder.advance()); Cell c = decoder.current(); - assertTrue(CellComparator.equals(c, cell1)); + assertTrue(CellUtil.equals(c, cell1)); List tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(2, tags.size()); Tag tag = tags.get(0); @@ -87,7 +87,7 @@ public class TestCellCodecWithTags { assertTrue(Bytes.equals(Bytes.toBytes("teststring2"), tag.getValue())); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, cell2)); + assertTrue(CellUtil.equals(c, cell2)); tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(1, tags.size()); tag = tags.get(0); @@ -95,7 +95,7 @@ public class TestCellCodecWithTags { assertTrue(Bytes.equals(Bytes.toBytes("teststring3"), tag.getValue())); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, cell3)); + assertTrue(CellUtil.equals(c, cell3)); tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(3, tags.size()); tag = tags.get(0); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java index 007647a..04fb9a9 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java @@ -29,7 +29,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; @@ -76,7 +76,7 @@ public class TestKeyValueCodecWithTags { Codec.Decoder decoder = codec.getDecoder(dis); assertTrue(decoder.advance()); Cell c = decoder.current(); - assertTrue(CellComparator.equals(c, kv1)); + assertTrue(CellUtil.equals(c, kv1)); List tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(2, tags.size()); Tag tag = tags.get(0); @@ -87,7 +87,7 @@ public class TestKeyValueCodecWithTags { assertTrue(Bytes.equals(Bytes.toBytes("teststring2"), tag.getValue())); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv2)); + assertTrue(CellUtil.equals(c, kv2)); tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(1, tags.size()); tag = tags.get(0); @@ -95,7 +95,7 @@ public class TestKeyValueCodecWithTags { assertTrue(Bytes.equals(Bytes.toBytes("teststring3"), tag.getValue())); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv3)); + assertTrue(CellUtil.equals(c, kv3)); tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); assertEquals(3, tags.size()); tag = tags.get(0); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java index eb5e453..0e0cec7 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java @@ -272,33 +272,25 @@ public class TestBytes extends TestCase { byte [] key4 = {0}; byte [] key5 = {2}; - assertEquals(1, Bytes.binarySearch(arr, key1, 0, 1, - Bytes.BYTES_RAWCOMPARATOR)); - assertEquals(0, Bytes.binarySearch(arr, key1, 1, 1, - Bytes.BYTES_RAWCOMPARATOR)); + assertEquals(1, Bytes.binarySearch(arr, key1, 0, 1)); + assertEquals(0, Bytes.binarySearch(arr, key1, 1, 1)); assertEquals(-(2+1), Arrays.binarySearch(arr, key2_2, Bytes.BYTES_COMPARATOR)); - assertEquals(-(2+1), Bytes.binarySearch(arr, key2, 0, 1, - Bytes.BYTES_RAWCOMPARATOR)); - assertEquals(4, Bytes.binarySearch(arr, key2, 1, 1, - Bytes.BYTES_RAWCOMPARATOR)); - assertEquals(2, Bytes.binarySearch(arr, key3, 0, 1, - Bytes.BYTES_RAWCOMPARATOR)); - assertEquals(5, Bytes.binarySearch(arr, key3, 1, 1, - Bytes.BYTES_RAWCOMPARATOR)); + assertEquals(-(2+1), Bytes.binarySearch(arr, key2, 0, 1)); + assertEquals(4, Bytes.binarySearch(arr, key2, 1, 1)); + assertEquals(2, Bytes.binarySearch(arr, key3, 0, 1)); + assertEquals(5, Bytes.binarySearch(arr, key3, 1, 1)); assertEquals(-1, - Bytes.binarySearch(arr, key4, 0, 1, Bytes.BYTES_RAWCOMPARATOR)); + Bytes.binarySearch(arr, key4, 0, 1)); assertEquals(-2, - Bytes.binarySearch(arr, key5, 0, 1, Bytes.BYTES_RAWCOMPARATOR)); + Bytes.binarySearch(arr, key5, 0, 1)); // Search for values to the left and to the right of each item in the array. for (int i = 0; i < arr.length; ++i) { assertEquals(-(i + 1), Bytes.binarySearch(arr, - new byte[] { (byte) (arr[i][0] - 1) }, 0, 1, - Bytes.BYTES_RAWCOMPARATOR)); + new byte[] { (byte) (arr[i][0] - 1) }, 0, 1)); assertEquals(-(i + 2), Bytes.binarySearch(arr, - new byte[] { (byte) (arr[i][0] + 1) }, 0, 1, - Bytes.BYTES_RAWCOMPARATOR)); + new byte[] { (byte) (arr[i][0] + 1) }, 0, 1)); } } diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java index a548b8a..e6f1b3e 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -52,6 +53,6 @@ public class TestPBCell { pbr.setPosition(0); decoded = CODEC.decode(pbr); assertEquals(encodedLength, pbr.getPosition()); - assertTrue(CellComparator.equals(cell, ProtobufUtil.toCell(decoded))); + assertTrue(CellUtil.equals(cell, ProtobufUtil.toCell(decoded))); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java index 3d72447..b053e3d 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java @@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.testclassification.IntegrationTests; @@ -78,7 +79,7 @@ public class IntegrationTestImportTsv implements Configurable, Tool { "row10\t1\tc1\tc2\n"; protected static final Set simple_expected = - new TreeSet(KeyValue.COMPARATOR) { + new TreeSet(CellComparator.COMPARATOR) { private static final long serialVersionUID = 1L; { byte[] family = Bytes.toBytes("d"); @@ -157,7 +158,7 @@ public class IntegrationTestImportTsv implements Configurable, Tool { assertTrue( format("Scan produced surprising result. expected: <%s>, actual: %s", expected, actual), - KeyValue.COMPARATOR.compare(expected, actual) == 0); + CellComparator.COMPARATOR.compare(expected, actual) == 0); } } assertFalse("Did not consume all expected values.", expectedIt.hasNext()); diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java index afcb526..71f053d 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java @@ -25,9 +25,9 @@ import java.nio.ByteBuffer; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.KeyValue.MetaComparator; +import org.apache.hadoop.hbase.CellComparator.MetaCellComparator; import org.apache.hadoop.hbase.KeyValue.RawBytesComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.codec.prefixtree.decode.DecoderFactory; @@ -58,7 +58,7 @@ import org.apache.hadoop.io.WritableUtils; * created and recycled by static PtEncoderFactory and PtDecoderFactory. */ @InterfaceAudience.Private -public class PrefixTreeCodec implements DataBlockEncoder{ +public class PrefixTreeCodec implements DataBlockEncoder { /** * no-arg constructor for reflection @@ -150,10 +150,11 @@ public class PrefixTreeCodec implements DataBlockEncoder{ * the way to this point. */ @Override - public EncodedSeeker createSeeker(KVComparator comparator, HFileBlockDecodingContext decodingCtx) { + public EncodedSeeker createSeeker(CellComparator comparator, + HFileBlockDecodingContext decodingCtx) { if (comparator instanceof RawBytesComparator){ - throw new IllegalArgumentException("comparator must be KeyValue.KeyComparator"); - } else if (comparator instanceof MetaComparator){ + throw new IllegalArgumentException("comparator must be CellComparator"); + } else if (comparator instanceof MetaCellComparator){ throw new IllegalArgumentException("DataBlockEncoding.PREFIX_TREE not compatible with hbase:meta " +"table"); } diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java index d9852be..73e8ab4 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java @@ -21,9 +21,9 @@ package org.apache.hadoop.hbase.codec.prefixtree; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.SettableSequenceId; @@ -213,13 +213,6 @@ public class PrefixTreeSeeker implements EncodedSeeker { } @Override - public int compareKey(KVComparator comparator, byte[] key, int offset, int length) { - // can't optimize this, make a copy of the key - ByteBuffer bb = getKeyDeepCopy(); - return comparator.compareFlatKey(key, offset, length, bb.array(), bb.arrayOffset(), bb.limit()); - } - - @Override public int seekToKeyInBlock(Cell key, boolean forceBeforeOnExactMatch) { if (USE_POSITION_BEFORE) { return seekToOrBeforeUsingPositionAtOrBefore(key, forceBeforeOnExactMatch); @@ -229,7 +222,8 @@ public class PrefixTreeSeeker implements EncodedSeeker { } @Override - public int compareKey(KVComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, Cell key) { + // can't optimize this, make a copy of the key ByteBuffer bb = getKeyDeepCopy(); return comparator.compare(key, new KeyValue.KeyOnlyKeyValue(bb.array(), bb.arrayOffset(), bb.limit())); diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java index cb7eeea..9776f90 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArrayScanner.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.codec.prefixtree.decode; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta; import org.apache.hadoop.hbase.codec.prefixtree.decode.column.ColumnReader; @@ -61,7 +60,6 @@ public class PrefixTreeArrayScanner extends PrefixTreeCell implements CellScanne protected boolean nubCellsRemain; protected int currentCellIndex; - /*********************** construct ******************************/ // pass in blockMeta so we can initialize buffers big enough for all cells in the block @@ -420,7 +418,7 @@ public class PrefixTreeArrayScanner extends PrefixTreeCell implements CellScanne protected int populateNonRowFieldsAndCompareTo(int cellNum, Cell key) { populateNonRowFields(cellNum); - return CellComparator.compare(this, key, true); + return comparator.compareOnlyKeyPortion(this, key); } protected void populateFirstNonRowFields() { diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java index 97eed62..27328fe 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java @@ -33,6 +33,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; */ @InterfaceAudience.Private public class PrefixTreeCell implements Cell, SettableSequenceId, Comparable { + // Create a reference here? Can be removed too + protected CellComparator comparator = CellComparator.COMPARATOR; /********************** static **********************/ @@ -91,19 +93,19 @@ public class PrefixTreeCell implements Cell, SettableSequenceId, Comparable= 0) { + if (-(getComparator().compare(splitCell, bb.array(), bb.arrayOffset(), + bb.limit())) >= 0) { atEnd = true; return false; } @@ -196,9 +196,9 @@ public class HalfStoreFileReader extends StoreFile.Reader { } // Check key. ByteBuffer k = this.delegate.getKey(); - return this.delegate.getReader().getComparator(). - compareFlatKey(k.array(), k.arrayOffset(), k.limit(), - splitkey, 0, splitkey.length) < 0; + return (-this.delegate.getReader().getComparator(). + compare(splitCell, k.array(), k.arrayOffset(), k.limit() + )) < 0; } public org.apache.hadoop.hbase.io.hfile.HFile.Reader getReader() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 3dcfc9b..a1c32c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -29,8 +29,7 @@ import java.nio.ByteBuffer; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.protobuf.generated.HFileProtos; import org.apache.hadoop.hbase.util.Bytes; @@ -107,7 +106,8 @@ public class FixedFileTrailer { private long lastDataBlockOffset; /** Raw key comparator class name in version 3 */ - private String comparatorClassName = KeyValue.COMPARATOR.getLegacyKeyComparatorName(); + private String comparatorClassName = (CellComparator.COMPARATOR) + .getLegacyKeyComparatorName(); /** The encryption key */ private byte[] encryptionKey; @@ -536,18 +536,20 @@ public class FixedFileTrailer { return minorVersion; } - public void setComparatorClass(Class klass) { + public void setComparatorClass(Class klass) { // Is the comparator instantiable? try { - KVComparator comp = klass.newInstance(); + CellComparator comp = klass.newInstance(); // HFile V2 legacy comparator class names. - if (KeyValue.COMPARATOR.getClass().equals(klass)) { - comparatorClassName = KeyValue.COMPARATOR.getLegacyKeyComparatorName(); - } else if (KeyValue.META_COMPARATOR.getClass().equals(klass)) { - comparatorClassName = KeyValue.META_COMPARATOR.getLegacyKeyComparatorName(); - } else if (KeyValue.RAW_COMPARATOR.getClass().equals(klass)) { - comparatorClassName = KeyValue.RAW_COMPARATOR.getLegacyKeyComparatorName(); + if (CellComparator.COMPARATOR.getClass().equals(klass)) { + comparatorClassName = (CellComparator.COMPARATOR) + .getLegacyKeyComparatorName(); + } else if (CellComparator.META_COMPARATOR.getClass().equals(klass)) { + comparatorClassName = (CellComparator.META_COMPARATOR) + .getLegacyKeyComparatorName(); + } else if (CellComparator.RAW_COMPARATOR.getClass().equals(klass)) { + comparatorClassName = CellComparator.RAW_COMPARATOR.getLegacyKeyComparatorName(); } else { // if the name wasn't one of the legacy names, maybe its a legit new kind of comparator. comparatorClassName = klass.getName(); @@ -561,28 +563,31 @@ public class FixedFileTrailer { } @SuppressWarnings("unchecked") - private static Class getComparatorClass( + private static Class getComparatorClass( String comparatorClassName) throws IOException { try { // HFile V2 legacy comparator class names. - if (comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName())) { - comparatorClassName = KeyValue.COMPARATOR.getClass().getName(); - } else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName())) { - comparatorClassName = KeyValue.META_COMPARATOR.getClass().getName(); - } else if (comparatorClassName.equals(KeyValue.RAW_COMPARATOR.getLegacyKeyComparatorName())) { - comparatorClassName = KeyValue.RAW_COMPARATOR.getClass().getName(); + if (comparatorClassName.equals((CellComparator.COMPARATOR) + .getLegacyKeyComparatorName())) { + comparatorClassName = CellComparator.COMPARATOR.getClass().getName(); + } else if (comparatorClassName.equals((CellComparator.META_COMPARATOR) + .getLegacyKeyComparatorName())) { + comparatorClassName = CellComparator.META_COMPARATOR.getClass().getName(); + } else if (comparatorClassName.equals + (CellComparator.RAW_COMPARATOR.getLegacyKeyComparatorName())) { + comparatorClassName = CellComparator.RAW_COMPARATOR.getClass().getName(); } // if the name wasn't one of the legacy names, maybe its a legit new kind of comparator. - return (Class) + return (Class) Class.forName(comparatorClassName); } catch (ClassNotFoundException ex) { throw new IOException(ex); } } - public static KVComparator createComparator( + public static CellComparator createComparator( String comparatorClassName) throws IOException { try { return getComparatorClass(comparatorClassName).newInstance(); @@ -595,7 +600,7 @@ public class FixedFileTrailer { } } - KVComparator createComparator() throws IOException { + CellComparator createComparator() throws IOException { expectAtLeastMajorVersion(2); return createComparator(comparatorClassName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 09233a2..e854782 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -37,6 +37,7 @@ import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -48,9 +49,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.compress.Compression; @@ -61,7 +61,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HFileProtos; import org.apache.hadoop.hbase.util.BloomFilterWriter; -import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.FSUtils; @@ -251,7 +250,8 @@ public class HFile { protected FileSystem fs; protected Path path; protected FSDataOutputStream ostream; - protected KVComparator comparator = KeyValue.COMPARATOR; + protected CellComparator comparator = + CellComparator.COMPARATOR; protected InetSocketAddress[] favoredNodes; private HFileContext fileContext; @@ -274,7 +274,7 @@ public class HFile { return this; } - public WriterFactory withComparator(KVComparator comparator) { + public WriterFactory withComparator(CellComparator comparator) { Preconditions.checkNotNull(comparator); this.comparator = comparator; return this; @@ -304,7 +304,7 @@ public class HFile { } protected abstract Writer createWriter(FileSystem fs, Path path, FSDataOutputStream ostream, - KVComparator comparator, HFileContext fileContext) throws IOException; + CellComparator comparator, HFileContext fileContext) throws IOException; } /** The configuration key for HFile version to use for new files */ @@ -387,7 +387,7 @@ public class HFile { */ String getName(); - KVComparator getComparator(); + CellComparator getComparator(); HFileScanner getScanner(boolean cacheBlocks, final boolean pread, final boolean isCompaction); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 5b54807..f9aaac7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -107,9 +107,10 @@ public class HFileBlockIndex { */ public static class BlockIndexReader implements HeapSize { /** Needed doing lookup on blocks. */ - private final KVComparator comparator; + private final CellComparator comparator; // Root-level data. + // TODO : Convert these to Cells (ie) KeyValue.KeyOnlyKV private byte[][] blockKeys; private long[] blockOffsets; private int[] blockDataSizes; @@ -132,13 +133,13 @@ public class HFileBlockIndex { /** A way to read {@link HFile} blocks at a given offset */ private CachingBlockReader cachingBlockReader; - public BlockIndexReader(final KVComparator c, final int treeLevel, + public BlockIndexReader(final CellComparator c, final int treeLevel, final CachingBlockReader cachingBlockReader) { this(c, treeLevel); this.cachingBlockReader = cachingBlockReader; } - public BlockIndexReader(final KVComparator c, final int treeLevel) + public BlockIndexReader(final CellComparator c, final int treeLevel) { comparator = c; searchTreeLevel = treeLevel; @@ -391,12 +392,21 @@ public class HFileBlockIndex { * * @param key * Key to find + * @param useByteComp true when RawBytesComparator needs to be used, else false * @return Offset of block containing key (between 0 and the * number of blocks - 1) or -1 if this file does not contain the * request. */ - public int rootBlockContainingKey(final byte[] key, int offset, int length) { - int pos = Bytes.binarySearch(blockKeys, key, offset, length, comparator); + // When we want to find the meta index block or bloom block for ROW bloom type + // RAW_COMPARATOR would be enough. For the ROW_COL bloom case we need the CellComparator. + public int rootBlockContainingKey(final byte[] key, int offset, int length, + boolean useByteComp) { + int pos; + if(useByteComp) { + pos = Bytes.binarySearch(blockKeys, key, offset, length); + } else { + pos = Bytes.binarySearch(blockKeys, key, offset, length, comparator); + } // pos is between -(blockKeys.length + 1) to blockKeys.length - 1, see // binarySearch's javadoc. @@ -507,7 +517,7 @@ public class HFileBlockIndex { * @throws IOException */ static int binarySearchNonRootIndex(Cell key, ByteBuffer nonRootIndex, - KVComparator comparator) { + CellComparator comparator) { int numEntries = nonRootIndex.getInt(0); int low = 0; @@ -596,7 +606,7 @@ public class HFileBlockIndex { * */ static int locateNonRootIndexEntry(ByteBuffer nonRootBlock, Cell key, - KVComparator comparator) { + CellComparator comparator) { int entryIndex = binarySearchNonRootIndex(key, nonRootBlock, comparator); if (entryIndex != -1) { @@ -641,7 +651,7 @@ public class HFileBlockIndex { } } } - + /** * Read in the root-level index from the given input stream. Must match * what was written into the root level by diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 41779a7..7463e83 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; @@ -317,8 +316,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { do { Cell cell = scanner.getKeyValue(); if (row != null && row.length != 0) { - int result = CellComparator.compareRows(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength(), row, 0, row.length); + int result = CellComparator.COMPARATOR.compareRows(cell, row, 0, row.length); if (result > 0) { break; } else if (result < 0) { @@ -348,7 +346,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { } // check if rows are in order if (checkRow && pCell != null) { - if (CellComparator.compareRows(pCell, cell) > 0) { + if (CellComparator.COMPARATOR.compareRows(pCell, cell) > 0) { System.err.println("WARNING, previous row is greater then" + " current row\n\tfilename -> " + file + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> " @@ -468,7 +466,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { public void collect(Cell cell) { valLen.update(cell.getValueLength()); if (prevCell != null && - KeyValue.COMPARATOR.compareRows(prevCell, cell) != 0) { + CellComparator.COMPARATOR.compareRows(prevCell, cell) != 0) { // new row collectRow(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 1e84e6a..04d201ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -33,10 +33,11 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.compress.Compression; @@ -96,7 +97,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { private int avgValueLen = -1; /** Key comparator */ - private KVComparator comparator = new KVComparator(); + private CellComparator comparator = CellComparator.COMPARATOR; /** Size of this file. */ private final long fileSize; @@ -189,7 +190,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator, trailer.getNumDataIndexLevels(), this); metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader( - KeyValue.RAW_COMPARATOR, 1); + CellComparator.RAW_COMPARATOR, 1); // Parse load-on-open data. @@ -355,7 +356,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { @Override public byte[] getFirstRowKey() { byte[] firstKey = getFirstKey(); - return firstKey == null? null: KeyValue.createKeyValueFromKey(firstKey).getRow(); + return firstKey == null? null: KeyValueUtil.createKeyValueFromKey(firstKey).getRow(); } /** @@ -367,7 +368,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { @Override public byte[] getLastRowKey() { byte[] lastKey = getLastKey(); - return lastKey == null? null: KeyValue.createKeyValueFromKey(lastKey).getRow(); + return lastKey == null? null: KeyValueUtil.createKeyValueFromKey(lastKey).getRow(); } /** @return number of KV entries in this HFile */ @@ -378,7 +379,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { /** @return comparator */ @Override - public KVComparator getComparator() { + public CellComparator getComparator() { return comparator; } @@ -789,8 +790,9 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { + KEY_VALUE_LEN_SIZE, currKeyLen).slice(); } - public int compareKey(KVComparator comparator, byte[] key, int offset, int length) { - return comparator.compareFlatKey(key, offset, length, blockBuffer.array(), + public int compareKey(CellComparator comparator, byte[] key, int offset, int length) { + KeyValue.KeyOnlyKeyValue keyOnlyKv = new KeyValue.KeyOnlyKeyValue(key, offset, length); + return comparator.compare(keyOnlyKv, blockBuffer.array(), blockBuffer.arrayOffset() + blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen); } @@ -973,7 +975,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { currValueLen); } - public int compareKey(KVComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, Cell key) { return comparator.compareOnlyKeyPortion( key, new KeyValue.KeyOnlyKeyValue(blockBuffer.array(), blockBuffer.arrayOffset() @@ -1104,7 +1106,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { byte[] mbname = Bytes.toBytes(metaBlockName); int block = metaBlockIndexReader.rootBlockContainingKey(mbname, - 0, mbname.length); + 0, mbname.length, true); if (block == -1) return null; long blockSize = metaBlockIndexReader.getRootBlockDataSize(block); @@ -1425,10 +1427,6 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { return seeker.getKeyDeepCopy(); } - public int compareKey(KVComparator comparator, byte[] key, int offset, int length) { - return seeker.compareKey(comparator, key, offset, length); - } - @Override public ByteBuffer getValue() { assertValidSeek(); @@ -1478,7 +1476,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { return seeker.seekToKeyInBlock(key, seekBefore); } - public int compareKey(KVComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, Cell key) { return seeker.compareKey(comparator, key); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterFactory.java index 047022d..92eb760 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterFactory.java @@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; public class HFileWriterFactory extends HFile.WriterFactory { HFileWriterFactory(Configuration conf, CacheConfig cacheConf) { @@ -33,7 +33,7 @@ public class HFileWriterFactory extends HFile.WriterFactory { @Override public HFile.Writer createWriter(FileSystem fs, Path path, FSDataOutputStream ostream, - KVComparator comparator, HFileContext context) + CellComparator comparator, HFileContext context) throws IOException { return new HFileWriterImpl(conf, cacheConf, path, ostream, comparator, context); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 0555363..55d759b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -36,8 +36,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Encryption; @@ -83,7 +81,7 @@ public class HFileWriterImpl implements HFile.Writer { protected long totalUncompressedBytes = 0; /** Key comparator. Used to ensure we write in order. */ - protected final KVComparator comparator; + protected final CellComparator comparator; /** Meta block names. */ protected List metaNames = new ArrayList(); @@ -154,7 +152,7 @@ public class HFileWriterImpl implements HFile.Writer { public HFileWriterImpl(final Configuration conf, CacheConfig cacheConf, Path path, FSDataOutputStream outputStream, - KVComparator comparator, HFileContext fileContext) { + CellComparator comparator, HFileContext fileContext) { this.outputStream = outputStream; this.path = path; this.name = path != null ? path.getName() : outputStream.toString(); @@ -166,7 +164,7 @@ public class HFileWriterImpl implements HFile.Writer { this.blockEncoder = NoOpDataBlockEncoder.INSTANCE; } this.comparator = comparator != null ? comparator - : KeyValue.COMPARATOR; + : CellComparator.COMPARATOR; closeOutputStream = path != null; this.cacheConf = cacheConf; @@ -319,7 +317,7 @@ public class HFileWriterImpl implements HFile.Writer { fsBlockWriter.writeHeaderAndData(outputStream); int onDiskSize = fsBlockWriter.getOnDiskSizeWithHeader(); Cell indexEntry = - CellComparator.getMidpoint(this.comparator, lastCellOfPreviousBlock, firstCellInBlock); + CellUtil.getMidpoint(this.comparator, lastCellOfPreviousBlock, firstCellInBlock); dataBlockIndexWriter.addEntry(CellUtil.getCellKeySerializedAsKeyValueKey(indexEntry), lastDataBlockOffset, onDiskSize); totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 26ae097..2fb80c3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -248,7 +249,7 @@ public class HFileOutputFormat2 wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs) .withOutputDir(familydir).withBloomType(bloomType) - .withComparator(KeyValue.COMPARATOR) + .withComparator(CellComparator.COMPARATOR) .withFileContext(hFileContext).build(); this.writers.put(family, wl); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java index 192afd8..ea7d33a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java @@ -24,6 +24,7 @@ import java.io.InputStream; import java.io.OutputStream; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; @@ -55,7 +56,7 @@ public class KeyValueSerialization implements Serialization { @Override public KeyValue deserialize(KeyValue ignore) throws IOException { // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO - return KeyValue.create(this.dis); + return KeyValueUtil.create(this.dis); } @Override @@ -79,7 +80,7 @@ public class KeyValueSerialization implements Serialization { @Override public void serialize(KeyValue kv) throws IOException { - KeyValue.write(kv, this.dos); + KeyValueUtil.write(kv, this.dos); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java index 69e6438..52dd468 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java @@ -22,6 +22,7 @@ import java.util.TreeSet; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Reducer; @@ -39,7 +40,7 @@ public class KeyValueSortReducer extends Reducer kvs, org.apache.hadoop.mapreduce.Reducer.Context context) throws java.io.IOException, InterruptedException { - TreeSet map = new TreeSet(KeyValue.COMPARATOR); + TreeSet map = new TreeSet(CellComparator.COMPARATOR); for (KeyValue kv: kvs) { try { map.add(kv.clone()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java index db9e585..d77779b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java @@ -25,6 +25,7 @@ import java.util.TreeSet; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Put; @@ -58,7 +59,7 @@ public class PutSortReducer extends "putsortreducer.row.threshold", 1L * (1<<30)); Iterator iter = puts.iterator(); while (iter.hasNext()) { - TreeSet map = new TreeSet(KeyValue.COMPARATOR); + TreeSet map = new TreeSet(CellComparator.COMPARATOR); long curSize = 0; // stop at the end or the RAM threshold while (iter.hasNext() && curSize < threshold) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java index 1c65068..52bc8df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java @@ -108,7 +108,7 @@ implements Configurable { } } int pos = Bytes.binarySearch(this.splits, key.get(), key.getOffset(), - key.getLength(), Bytes.BYTES_RAWCOMPARATOR); + key.getLength()); // Below code is from hfile index search. if (pos < 0) { pos++; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java index b3981a1..0aaeeb0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; @@ -145,7 +146,7 @@ public class TextSortReducer extends "reducer.row.threshold", 1L * (1<<30)); Iterator iter = lines.iterator(); while (iter.hasNext()) { - Set kvs = new TreeSet(KeyValue.COMPARATOR); + Set kvs = new TreeSet(CellComparator.COMPARATOR); long curSize = 0; // stop at the end or the RAM threshold while (iter.hasNext() && curSize < threshold) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java index 4c3ab50..e9941b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java @@ -27,7 +27,7 @@ import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; /** @@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; public class CellSkipListSet implements NavigableSet { private final ConcurrentNavigableMap delegatee; - CellSkipListSet(final KeyValue.KVComparator c) { + CellSkipListSet(final CellComparator c) { this.delegatee = new ConcurrentSkipListMap(c); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 3da0c0b..f8a1182 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -84,7 +85,7 @@ public class DefaultMemStore implements MemStore { // Snapshot of memstore. Made for flusher. volatile CellSkipListSet snapshot; - final KeyValue.KVComparator comparator; + final CellComparator comparator; // Used to track own heapSize final AtomicLong size; @@ -104,7 +105,7 @@ public class DefaultMemStore implements MemStore { * Default constructor. Used for tests. */ public DefaultMemStore() { - this(HBaseConfiguration.create(), KeyValue.COMPARATOR); + this(HBaseConfiguration.create(), CellComparator.COMPARATOR); } /** @@ -112,7 +113,7 @@ public class DefaultMemStore implements MemStore { * @param c Comparator */ public DefaultMemStore(final Configuration conf, - final KeyValue.KVComparator c) { + final CellComparator c) { this.conf = conf; this.comparator = c; this.cellSet = new CellSkipListSet(c); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java index 3c1345d..519edde 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java @@ -24,8 +24,8 @@ import java.util.List; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; @@ -63,7 +63,7 @@ public class DefaultStoreEngine extends StoreEngine< @Override protected void createComponents( - Configuration conf, Store store, KVComparator kvComparator) throws IOException { + Configuration conf, Store store, CellComparator kvComparator) throws IOException { String className = conf.get(DEFAULT_COMPACTOR_CLASS_KEY, DEFAULT_COMPACTOR_CLASS.getName()); try { compactor = ReflectionUtils.instantiateWithCustomCtor(className, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java index 8305b99..2761e0a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java @@ -29,8 +29,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; @@ -45,7 +45,7 @@ import com.google.common.collect.Lists; class DefaultStoreFileManager implements StoreFileManager { static final Log LOG = LogFactory.getLog(DefaultStoreFileManager.class); - private final KVComparator kvComparator; + private final CellComparator kvComparator; private final CompactionConfiguration comConf; private final int blockingFileCount; @@ -55,7 +55,7 @@ class DefaultStoreFileManager implements StoreFileManager { */ private volatile ImmutableList storefiles = null; - public DefaultStoreFileManager(KVComparator kvComparator, Configuration conf, + public DefaultStoreFileManager(CellComparator kvComparator, Configuration conf, CompactionConfiguration comConf) { this.kvComparator = kvComparator; this.comConf = comConf; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java index 4d22c0e..3686834 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.util.Bytes; /** @@ -45,7 +44,7 @@ class GetClosestRowBeforeTracker { private final long now; private final long oldestUnexpiredTs; private Cell candidate = null; - private final KVComparator kvcomparator; + private final CellComparator cellComparator; // Flag for whether we're doing getclosest on a metaregion. private final boolean metaregion; // Offset and length into targetkey demarking table name (if in a metaregion). @@ -62,7 +61,7 @@ class GetClosestRowBeforeTracker { * @param ttl Time to live in ms for this Store * @param metaregion True if this is hbase:meta or -ROOT- region. */ - GetClosestRowBeforeTracker(final KVComparator c, final KeyValue kv, + GetClosestRowBeforeTracker(final CellComparator c, final KeyValue kv, final long ttl, final boolean metaregion) { super(); this.metaregion = metaregion; @@ -72,13 +71,13 @@ class GetClosestRowBeforeTracker { this.rowoffset = kv.getRowOffset(); int l = -1; if (metaregion) { - l = KeyValue.getDelimiter(kv.getRowArray(), rowoffset, kv.getRowLength(), + l = Bytes.searchDelimiterIndex(kv.getRowArray(), rowoffset, kv.getRowLength(), HConstants.DELIMITER) - this.rowoffset; } this.tablenamePlusDelimiterLength = metaregion? l + 1: -1; this.now = System.currentTimeMillis(); this.oldestUnexpiredTs = now - ttl; - this.kvcomparator = c; + this.cellComparator = c; this.deletes = new TreeMap>(new CellComparator.RowComparator()); } @@ -89,7 +88,7 @@ class GetClosestRowBeforeTracker { private void addDelete(final Cell kv) { NavigableSet rowdeletes = this.deletes.get(kv); if (rowdeletes == null) { - rowdeletes = new TreeSet(this.kvcomparator); + rowdeletes = new TreeSet(this.cellComparator); this.deletes.put(kv, rowdeletes); } rowdeletes.add(kv); @@ -109,8 +108,8 @@ class GetClosestRowBeforeTracker { boolean isBetterCandidate(final Cell contender) { return this.candidate == null || - (this.kvcomparator.compareRows(this.candidate, contender) < 0 && - this.kvcomparator.compareRows(contender, this.targetkey) <= 0); + (this.cellComparator.compareRows(this.candidate, contender) < 0 && + this.cellComparator.compareRows(contender, this.targetkey) <= 0); } /* @@ -231,7 +230,7 @@ class GetClosestRowBeforeTracker { * @return True if we went too far, past the target key. */ boolean isTooFar(final Cell kv, final Cell firstOnRow) { - return this.kvcomparator.compareRows(kv, firstOnRow) > 0; + return this.cellComparator.compareRows(kv, firstOnRow) > 0; } boolean isTargetTable(final Cell kv) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index e082698..ccfca5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -70,6 +70,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompoundConfiguration; @@ -289,7 +290,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private final HRegionFileSystem fs; protected final Configuration conf; private final Configuration baseConf; - private final KeyValue.KVComparator comparator; + private final CellComparator comparator; private final int rowLockWaitDuration; static final int DEFAULT_ROWLOCK_WAIT_DURATION = 30000; @@ -1646,7 +1647,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi /** * @return KeyValue Comparator */ - public KeyValue.KVComparator getComparator() { + public CellComparator getComparator() { return this.comparator; } @@ -5666,6 +5667,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi protected boolean isStopRow(byte[] currentRow, int offset, short length) { return currentRow == null || (stopRow != null && + // TODO : currentRow can be tracked as cell rather than byte[] comparator.compareRows(stopRow, 0, stopRow.length, currentRow, offset, length) <= isScan); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 014ec2c..80b1637 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -588,8 +588,7 @@ public class HRegionFileSystem { byte[] lastKey = f.createReader().getLastKey(); // If lastKey is null means storefile is empty. if (lastKey == null) return null; - if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(), - splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) { + if (f.getReader().getComparator().compare(splitKey, lastKey, 0, lastKey.length) > 0) { return null; } } else { @@ -598,8 +597,7 @@ public class HRegionFileSystem { byte[] firstKey = f.createReader().getFirstKey(); // If firstKey is null means storefile is empty. if (firstKey == null) return null; - if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(), - splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) { + if (f.getReader().getComparator().compare(splitKey, firstKey, 0, firstKey.length) < 0) { return null; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 042deed..3e3f39c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; @@ -178,7 +179,7 @@ public class HStore implements Store { private int bytesPerChecksum; // Comparing KeyValues - private final KeyValue.KVComparator comparator; + private final CellComparator comparator; final StoreEngine storeEngine; @@ -245,7 +246,7 @@ public class HStore implements Store { scanInfo = new ScanInfo(family, ttl, timeToPurgeDeletes, this.comparator); String className = conf.get(MEMSTORE_CLASS_NAME, DefaultMemStore.class.getName()); this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { - Configuration.class, KeyValue.KVComparator.class }, new Object[] { conf, this.comparator }); + Configuration.class, CellComparator.class }, new Object[] { conf, this.comparator }); this.offPeakHours = OffPeakHours.getInstance(conf); // Setting up cache configuration for this family @@ -723,7 +724,7 @@ public class HStore implements Store { Preconditions.checkState(firstKey != null, "First key can not be null"); byte[] lk = reader.getLastKey(); Preconditions.checkState(lk != null, "Last key can not be null"); - byte[] lastKey = KeyValue.createKeyValueFromKey(lk).getRow(); + byte[] lastKey = KeyValueUtil.createKeyValueFromKey(lk).getRow(); LOG.debug("HFile bounds: first=" + Bytes.toStringBinary(firstKey) + " last=" + Bytes.toStringBinary(lastKey)); @@ -752,7 +753,7 @@ public class HStore implements Store { do { Cell cell = scanner.getKeyValue(); if (prevCell != null) { - if (CellComparator.compareRows(prevCell, cell) > 0) { + if (comparator.compareRows(prevCell, cell) > 0) { throw new InvalidHFileException("Previous row is greater than" + " current row: path=" + srcPath + " previous=" + CellUtil.getCellKeyAsString(prevCell) + " current=" @@ -1850,9 +1851,9 @@ public class HStore implements Store { // TODO: Cache these keys rather than make each time? byte [] fk = r.getFirstKey(); if (fk == null) return false; - KeyValue firstKV = KeyValue.createKeyValueFromKey(fk, 0, fk.length); + KeyValue firstKV = KeyValueUtil.createKeyValueFromKey(fk, 0, fk.length); byte [] lk = r.getLastKey(); - KeyValue lastKV = KeyValue.createKeyValueFromKey(lk, 0, lk.length); + KeyValue lastKV = KeyValueUtil.createKeyValueFromKey(lk, 0, lk.length); KeyValue firstOnRow = state.getTargetKey(); if (this.comparator.compareRows(lastKV, firstOnRow) < 0) { // If last key in file is not of the target table, no candidates in this @@ -2309,7 +2310,7 @@ public class HStore implements Store { } @Override - public KeyValue.KVComparator getComparator() { + public CellComparator getComparator() { return comparator; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index 761267f..43a57f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -25,7 +25,7 @@ import java.util.List; import java.util.PriorityQueue; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; @@ -67,7 +67,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner * @param comparator */ public KeyValueHeap(List scanners, - KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { this(scanners, new KVScannerComparator(comparator)); } @@ -164,12 +164,12 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner } protected static class KVScannerComparator implements Comparator { - protected KVComparator kvComparator; + protected CellComparator kvComparator; /** * Constructor * @param kvComparator */ - public KVScannerComparator(KVComparator kvComparator) { + public KVScannerComparator(CellComparator kvComparator) { this.kvComparator = kvComparator; } public int compare(KeyValueScanner left, KeyValueScanner right) { @@ -202,7 +202,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner /** * @return KVComparator */ - public KVComparator getComparator() { + public CellComparator getComparator() { return this.kvComparator; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java index c7ce180..5167b4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java @@ -24,8 +24,8 @@ import java.util.List; import org.apache.commons.lang.NotImplementedException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.KeyValue.KVComparator; /** * ReversedKeyValueHeap is used for supporting reversed scanning. Compared with @@ -43,7 +43,7 @@ public class ReversedKeyValueHeap extends KeyValueHeap { * @throws IOException */ public ReversedKeyValueHeap(List scanners, - KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { super(scanners, new ReversedKVScannerComparator(comparator)); } @@ -77,9 +77,7 @@ public class ReversedKeyValueHeap extends KeyValueHeap { KeyValueScanner scanner; while ((scanner = heap.poll()) != null) { Cell topKey = scanner.peek(); - if (comparator.getComparator().compareRows(topKey.getRowArray(), - topKey.getRowOffset(), topKey.getRowLength(), seekKey.getRowArray(), - seekKey.getRowOffset(), seekKey.getRowLength()) < 0) { + if (comparator.getComparator().compareRows(topKey, seekKey) < 0) { // Row of Top KeyValue is before Seek row. heap.add(scanner); current = pollRealKV(); @@ -162,7 +160,7 @@ public class ReversedKeyValueHeap extends KeyValueHeap { * Constructor * @param kvComparator */ - public ReversedKVScannerComparator(KVComparator kvComparator) { + public ReversedKVScannerComparator(CellComparator kvComparator) { super(kvComparator); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java index feda699..b3472ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java @@ -58,6 +58,8 @@ class ReversedRegionScannerImpl extends RegionScannerImpl { @Override protected boolean isStopRow(byte[] currentRow, int offset, short length) { return currentRow == null + // TODO : When BB backed cell also comes we could track the currentRow as Cell + // rather than byte[] || (super.stopRow != null && region.getComparator().compareRows( stopRow, 0, stopRow.length, currentRow, offset, length) >= super.isScan); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java index e319f90..d198d7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java @@ -24,10 +24,9 @@ import java.util.NavigableSet; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.client.Scan; /** @@ -63,7 +62,7 @@ class ReversedStoreScanner extends StoreScanner implements KeyValueScanner { @Override protected void resetKVHeap(List scanners, - KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { // Combine all seeked scanners with a heap heap = new ReversedKeyValueHeap(scanners, comparator); } @@ -100,11 +99,11 @@ class ReversedStoreScanner extends StoreScanner implements KeyValueScanner { @Override protected void checkScanOrder(Cell prevKV, Cell kv, - KeyValue.KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { // Check that the heap gives us KVs in an increasing order for same row and // decreasing order for different rows. assert prevKV == null || comparator == null || comparator.compareRows(kv, prevKV) < 0 - || (comparator.matchingRows(kv, prevKV) && comparator.compare(kv, + || (CellUtil.matchingRows(kv, prevKV) && comparator.compare(kv, prevKV) >= 0) : "Key " + prevKV + " followed by a " + "error order key " + kv + " in cf " + store + " in reversed scan"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java index a8b314e..83a1bdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java @@ -19,9 +19,9 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -36,7 +36,7 @@ public class ScanInfo { private long ttl; private KeepDeletedCells keepDeletedCells; private long timeToPurgeDeletes; - private KVComparator comparator; + private CellComparator comparator; public static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT + (2 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_INT) @@ -50,7 +50,7 @@ public class ScanInfo { * @param comparator The store's comparator */ public ScanInfo(final HColumnDescriptor family, final long ttl, final long timeToPurgeDeletes, - final KVComparator comparator) { + final CellComparator comparator) { this(family.getName(), family.getMinVersions(), family.getMaxVersions(), ttl, family .getKeepDeletedCells(), timeToPurgeDeletes, comparator); } @@ -67,7 +67,7 @@ public class ScanInfo { */ public ScanInfo(final byte[] family, final int minVersions, final int maxVersions, final long ttl, final KeepDeletedCells keepDeletedCells, final long timeToPurgeDeletes, - final KVComparator comparator) { + final CellComparator comparator) { this.family = family; this.minVersions = minVersions; this.maxVersions = maxVersions; @@ -101,7 +101,7 @@ public class ScanInfo { return timeToPurgeDeletes; } - public KVComparator getComparator() { + public CellComparator getComparator() { return comparator; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index 032b4ce..b3007e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -25,10 +25,10 @@ import java.util.NavigableSet; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; @@ -86,7 +86,7 @@ public class ScanQueryMatcher { private final Cell startKey; /** Row comparator for the region this query is for */ - private final KeyValue.KVComparator rowComparator; + private final CellComparator rowComparator; /* row is not private for tests */ /** Row the query is on */ @@ -278,8 +278,7 @@ public class ScanQueryMatcher { if (filter != null && filter.filterAllRemaining()) { return MatchCode.DONE_SCAN; } - int ret = this.rowComparator.compareRows(row, this.rowOffset, this.rowLength, - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + int ret = (-this.rowComparator.compareRows(cell, row, this.rowOffset, this.rowLength)); if (!this.isReversed) { if (ret <= -1) { return MatchCode.DONE; @@ -495,16 +494,14 @@ public class ScanQueryMatcher { public boolean moreRowsMayExistAfter(Cell kv) { if (this.isReversed) { - if (rowComparator.compareRows(kv.getRowArray(), kv.getRowOffset(), - kv.getRowLength(), stopRow, 0, stopRow.length) <= 0) { + if (rowComparator.compareRows(kv, stopRow, 0, stopRow.length) <= 0) { return false; } else { return true; } } if (!Bytes.equals(stopRow , HConstants.EMPTY_END_ROW) && - rowComparator.compareRows(kv.getRowArray(),kv.getRowOffset(), - kv.getRowLength(), stopRow, 0, stopRow.length) >= 0) { + rowComparator.compareRows(kv, stopRow, 0, stopRow.length) >= 0) { // KV >= STOPROW // then NO there is nothing left. return false; @@ -584,32 +581,25 @@ public class ScanQueryMatcher { * @return result of the compare between the indexed key and the key portion of the passed cell */ public int compareKeyForNextRow(Cell nextIndexed, Cell kv) { - return rowComparator.compareKey(nextIndexed, - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - null, 0, 0, - null, 0, 0, - HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + return rowComparator.compareKeyBasedOnColHint(nextIndexed, kv, 0, 0, null, 0, 0, + HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); } /** * @param nextIndexed the key of the next entry in the block index (if any) - * @param kv The Cell we're using to calculate the seek key + * @param currentCell The Cell we're using to calculate the seek key * @return result of the compare between the indexed key and the key portion of the passed cell */ - public int compareKeyForNextColumn(Cell nextIndexed, Cell kv) { + public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) { ColumnCount nextColumn = columns.getColumnHint(); if (nextColumn == null) { - return rowComparator.compareKey(nextIndexed, - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), - kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), - HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell, 0, 0, null, 0, 0, + HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); } else { - return rowComparator.compareKey(nextIndexed, - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), - nextColumn.getBuffer(), nextColumn.getOffset(), nextColumn.getLength(), - HConstants.LATEST_TIMESTAMP, Type.Maximum.getCode()); + return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell, + currentCell.getFamilyOffset(), currentCell.getFamilyLength(), nextColumn.getBuffer(), + nextColumn.getOffset(), nextColumn.getLength(), HConstants.LATEST_TIMESTAMP, + Type.Maximum.getCode()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index a77fc0e..3b169ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -27,10 +27,10 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; @@ -59,7 +59,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf int NO_PRIORITY = Integer.MIN_VALUE; // General Accessors - KeyValue.KVComparator getComparator(); + CellComparator getComparator(); Collection getStorefiles(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java index 519767c..2164031 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java @@ -24,7 +24,7 @@ import java.util.List; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.Compactor; @@ -97,10 +97,10 @@ public abstract class StoreEngine create( - Store store, Configuration conf, KVComparator kvComparator) throws IOException { + Store store, Configuration conf, CellComparator kvComparator) throws IOException { String className = conf.get(STORE_ENGINE_CLASS_KEY, DEFAULT_STORE_ENGINE_CLASS.getName()); try { StoreEngine se = ReflectionUtils.instantiateWithCustomCtor( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 345dd9b..4650bdf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; @@ -538,7 +538,7 @@ public class StoreFile { private final CacheConfig cacheConf; private final FileSystem fs; - private KeyValue.KVComparator comparator = KeyValue.COMPARATOR; + private CellComparator comparator = CellComparator.COMPARATOR; private BloomType bloomType = BloomType.NONE; private long maxKeyCount = 0; private Path dir; @@ -585,7 +585,7 @@ public class StoreFile { return this; } - public WriterBuilder withComparator(KeyValue.KVComparator comparator) { + public WriterBuilder withComparator(CellComparator comparator) { Preconditions.checkNotNull(comparator); this.comparator = comparator; return this; @@ -637,7 +637,7 @@ public class StoreFile { } if (comparator == null) { - comparator = KeyValue.COMPARATOR; + comparator = CellComparator.COMPARATOR; } return new Writer(fs, filePath, conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext); @@ -670,7 +670,7 @@ public class StoreFile { * @return The split point row, or null if splitting is not possible, or reader is null. */ @SuppressWarnings("deprecation") - byte[] getFileSplitPoint(KVComparator comparator) throws IOException { + byte[] getFileSplitPoint(CellComparator comparator) throws IOException { if (this.reader == null) { LOG.warn("Storefile " + this + " Reader is null; cannot get split point"); return null; @@ -680,11 +680,11 @@ public class StoreFile { // the row we want to split on as midkey. byte [] midkey = this.reader.midkey(); if (midkey != null) { - KeyValue mk = KeyValue.createKeyValueFromKey(midkey, 0, midkey.length); + KeyValue mk = KeyValueUtil.createKeyValueFromKey(midkey, 0, midkey.length); byte [] fk = this.reader.getFirstKey(); - KeyValue firstKey = KeyValue.createKeyValueFromKey(fk, 0, fk.length); + KeyValue firstKey = KeyValueUtil.createKeyValueFromKey(fk, 0, fk.length); byte [] lk = this.reader.getLastKey(); - KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length); + KeyValue lastKey = KeyValueUtil.createKeyValueFromKey(lk, 0, lk.length); // if the midkey is the same as the first or last keys, we cannot (ever) split this region. if (comparator.compareRows(mk, firstKey) == 0 || comparator.compareRows(mk, lastKey) == 0) { if (LOG.isDebugEnabled()) { @@ -707,7 +707,7 @@ public class StoreFile { private final BloomType bloomType; private byte[] lastBloomKey; private int lastBloomKeyOffset, lastBloomKeyLen; - private KVComparator kvComparator; + private CellComparator kvComparator; private Cell lastCell = null; private long earliestPutTs = HConstants.LATEST_TIMESTAMP; private Cell lastDeleteFamilyCell = null; @@ -726,6 +726,7 @@ public class StoreFile { boolean isTimeRangeTrackerSet = false; protected HFile.Writer writer; + private KeyValue.KeyOnlyKeyValue lastBloomKeyOnlyKV = null; /** * Creates an HFile.Writer that also write helpful meta data. @@ -743,7 +744,7 @@ public class StoreFile { private Writer(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf, - final KVComparator comparator, BloomType bloomType, long maxKeys, + final CellComparator comparator, BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, HFileContext fileContext) throws IOException { writer = HFile.getWriterFactory(conf, cacheConf) @@ -761,6 +762,9 @@ public class StoreFile { if (generalBloomFilterWriter != null) { this.bloomType = bloomType; + if(this.bloomType == BloomType.ROWCOL) { + lastBloomKeyOnlyKV = new KeyValue.KeyOnlyKeyValue(); + } if (LOG.isTraceEnabled()) LOG.trace("Bloom filter type for " + path + ": " + this.bloomType + ", " + generalBloomFilterWriter.getClass().getSimpleName()); } else { @@ -838,10 +842,10 @@ public class StoreFile { if (this.lastCell != null) { switch(bloomType) { case ROW: - newKey = ! kvComparator.matchingRows(cell, lastCell); + newKey = ! CellUtil.matchingRows(cell, lastCell); break; case ROWCOL: - newKey = ! kvComparator.matchingRowColumn(cell, lastCell); + newKey = ! CellUtil.matchingRowColumn(cell, lastCell); break; case NONE: newKey = false; @@ -884,19 +888,27 @@ public class StoreFile { " (ROW or ROWCOL expected)"); } generalBloomFilterWriter.add(bloomKey, bloomKeyOffset, bloomKeyLen); - if (lastBloomKey != null - && generalBloomFilterWriter.getComparator().compareFlatKey(bloomKey, - bloomKeyOffset, bloomKeyLen, lastBloomKey, - lastBloomKeyOffset, lastBloomKeyLen) <= 0) { - throw new IOException("Non-increasing Bloom keys: " - + Bytes.toStringBinary(bloomKey, bloomKeyOffset, bloomKeyLen) - + " after " - + Bytes.toStringBinary(lastBloomKey, lastBloomKeyOffset, - lastBloomKeyLen)); + if (lastBloomKey != null) { + boolean res = false; + if (bloomType == BloomType.ROW) { + res = (Bytes.BYTES_RAWCOMPARATOR.compare(bloomKey, bloomKeyOffset, bloomKeyLen, + lastBloomKey, lastBloomKeyOffset, lastBloomKeyLen) <= 0); + } else { + res = (-(CellComparator.COMPARATOR.compare(lastBloomKeyOnlyKV, bloomKey, + bloomKeyOffset, bloomKeyLen)) <= 0); + } + if (res) { + throw new IOException("Non-increasing Bloom keys: " + + Bytes.toStringBinary(bloomKey, bloomKeyOffset, bloomKeyLen) + " after " + + Bytes.toStringBinary(lastBloomKey, lastBloomKeyOffset, lastBloomKeyLen)); + } } lastBloomKey = bloomKey; lastBloomKeyOffset = bloomKeyOffset; lastBloomKeyLen = bloomKeyLen; + if(bloomType == BloomType.ROWCOL) { + lastBloomKeyOnlyKV.setKey(bloomKey, bloomKeyOffset, bloomKeyLen); + } this.lastCell = cell; } } @@ -913,7 +925,7 @@ public class StoreFile { if (null != this.deleteFamilyBloomFilterWriter) { boolean newKey = true; if (lastDeleteFamilyCell != null) { - newKey = !kvComparator.matchingRows(cell, lastDeleteFamilyCell); + newKey = !CellUtil.matchingRows(cell, lastDeleteFamilyCell); } if (newKey) { this.deleteFamilyBloomFilterWriter.add(cell.getRowArray(), @@ -1030,6 +1042,7 @@ public class StoreFile { private byte[] lastBloomKey; private long deleteFamilyCnt = -1; private boolean bulkLoadResult = false; + private KeyValue.KeyOnlyKeyValue lastBloomKeyOnlyKV = null; public Reader(FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf) throws IOException { @@ -1050,7 +1063,7 @@ public class StoreFile { this.reader = null; } - public KVComparator getComparator() { + public CellComparator getComparator() { return reader.getComparator(); } @@ -1204,7 +1217,7 @@ public class StoreFile { if (!bloomFilter.supportsAutoLoading()) { return true; } - return bloomFilter.contains(row, rowOffset, rowLen, null); + return bloomFilter.contains(row, rowOffset, rowLen, null, true); } catch (IllegalArgumentException e) { LOG.error("Bad Delete Family bloom filter data -- proceeding without", e); @@ -1280,9 +1293,15 @@ public class StoreFile { // Whether the primary Bloom key is greater than the last Bloom key // from the file info. For row-column Bloom filters this is not yet // a sufficient condition to return false. - boolean keyIsAfterLast = lastBloomKey != null - && bloomFilter.getComparator().compareFlatKey(key, lastBloomKey) > 0; - + boolean keyIsAfterLast = (lastBloomKey != null); + if(keyIsAfterLast) { + if(bloomFilterType == BloomType.ROW) { + keyIsAfterLast = (Bytes.BYTES_RAWCOMPARATOR.compare(key, lastBloomKey) > 0); + } else { + keyIsAfterLast = (-CellComparator.COMPARATOR.compare(lastBloomKeyOnlyKV, key, 0, + key.length)) > 0; + } + } if (bloomFilterType == BloomType.ROWCOL) { // Since a Row Delete is essentially a DeleteFamily applied to all // columns, a file might be skipped if using row+col Bloom filter. @@ -1292,18 +1311,18 @@ public class StoreFile { null, 0, 0); if (keyIsAfterLast - && bloomFilter.getComparator().compareFlatKey(rowBloomKey, - lastBloomKey) > 0) { + && (-CellComparator.COMPARATOR.compare(lastBloomKeyOnlyKV, rowBloomKey, 0, + rowBloomKey.length)) > 0) { exists = false; } else { exists = - bloomFilter.contains(key, 0, key.length, bloom) || + bloomFilter.contains(key, 0, key.length, bloom, false) || bloomFilter.contains(rowBloomKey, 0, rowBloomKey.length, - bloom); + bloom, false); } } else { exists = !keyIsAfterLast - && bloomFilter.contains(key, 0, key.length, bloom); + && bloomFilter.contains(key, 0, key.length, bloom, true); } return exists; @@ -1340,12 +1359,12 @@ public class StoreFile { KeyValue largestScanKeyValue = scan.isReversed() ? KeyValueUtil .createLastOnRow(scan.getStartRow()) : KeyValueUtil.createLastOnRow(scan .getStopRow()); - boolean nonOverLapping = (getComparator().compareFlatKey( - this.getFirstKey(), largestScanKeyValue.getKey()) > 0 && !Bytes + boolean nonOverLapping = ((-getComparator().compare(largestScanKeyValue, + this.getFirstKey(), 0, this.getFirstKey().length)) > 0 && !Bytes .equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(), HConstants.EMPTY_END_ROW)) - || getComparator().compareFlatKey(this.getLastKey(), - smallestScanKeyValue.getKey()) < 0; + || (-getComparator().compare(smallestScanKeyValue, this.getLastKey(), 0, + this.getLastKey().length)) < 0; return !nonOverLapping; } @@ -1358,6 +1377,9 @@ public class StoreFile { } lastBloomKey = fi.get(LAST_BLOOM_KEY); + if(bloomFilterType == BloomType.ROWCOL) { + lastBloomKeyOnlyKV = new KeyValue.KeyOnlyKeyValue(lastBloomKey, 0, lastBloomKey.length); + } byte[] cnt = fi.get(DELETE_FAMILY_COUNT); if (cnt != null) { deleteFamilyCnt = Bytes.toLong(cnt); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 22fd46e..db144c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -215,9 +216,7 @@ public class StoreFileScanner implements KeyValueScanner { hfs.next(); setCurrentCell(hfs.getKeyValue()); if (this.stopSkippingKVsIfNextRow - && getComparator().compareRows(cur.getRowArray(), cur.getRowOffset(), - cur.getRowLength(), startKV.getRowArray(), startKV.getRowOffset(), - startKV.getRowLength()) > 0) { + && getComparator().compareRows(cur, startKV) > 0) { return false; } } @@ -369,7 +368,7 @@ public class StoreFileScanner implements KeyValueScanner { return reader; } - KeyValue.KVComparator getComparator() { + CellComparator getComparator() { return reader.getComparator(); } @@ -476,9 +475,7 @@ public class StoreFileScanner implements KeyValueScanner { public boolean backwardSeek(Cell key) throws IOException { seek(key); if (cur == null - || getComparator().compareRows(cur.getRowArray(), cur.getRowOffset(), - cur.getRowLength(), key.getRowArray(), key.getRowOffset(), - key.getRowLength()) > 0) { + || getComparator().compareRows(cur, key) > 0) { return seekToPreviousRow(key); } return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 665ed46..9ce19bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -31,11 +31,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.IsolationLevel; @@ -351,7 +351,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } protected void resetKVHeap(List scanners, - KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { // Combine all seeked scanners with a heap heap = new KeyValueHeap(scanners, comparator); } @@ -500,7 +500,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner Cell cell; // Only do a sanity-check if store and comparator are available. - KeyValue.KVComparator comparator = + CellComparator comparator = store != null ? store.getComparator() : null; int count = 0; @@ -750,7 +750,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * @throws IOException */ protected void checkScanOrder(Cell prevKV, Cell kv, - KeyValue.KVComparator comparator) throws IOException { + CellComparator comparator) throws IOException { // Check that the heap gives us KVs in an increasing order. assert prevKV == null || comparator == null || comparator.compare(prevKV, kv) <= 0 : "Key " + prevKV diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java index 5b4c4db..c31a091 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java @@ -27,7 +27,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.regionserver.compactions.Compactor; import org.apache.hadoop.hbase.util.Bytes; @@ -40,7 +41,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { /** Factory that is used to produce single StoreFile.Writer-s */ protected WriterFactory writerFactory; - protected KVComparator comparator; + protected CellComparator comparator; protected List existingWriters; protected List boundaries; @@ -60,7 +61,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { * @param factory Factory used to produce individual file writers. * @param comparator Comparator used to compare rows. */ - public void init(StoreScanner sourceScanner, WriterFactory factory, KVComparator comparator) + public void init(StoreScanner sourceScanner, WriterFactory factory, CellComparator comparator) throws IOException { this.writerFactory = factory; this.sourceScanner = sourceScanner; @@ -111,16 +112,15 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { /** * Subclasses can call this method to make sure the first KV is within multi-writer range. * @param left The left boundary of the writer. - * @param row The row to check. - * @param rowOffset Offset for row. - * @param rowLength Length for row. + * @param cell The cell whose row has to be checked. */ protected void sanityCheckLeft( - byte[] left, byte[] row, int rowOffset, int rowLength) throws IOException { + byte[] left, Cell cell) throws IOException { if (StripeStoreFileManager.OPEN_KEY != left && - comparator.compareRows(row, rowOffset, rowLength, left, 0, left.length) < 0) { + comparator.compareRows(cell, left, 0, left.length) < 0) { String error = "The first row is lower than the left boundary of [" + Bytes.toString(left) - + "]: [" + Bytes.toString(row, rowOffset, rowLength) + "]"; + + "]: [" + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + + "]"; LOG.error(error); throw new IOException(error); } @@ -129,16 +129,15 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { /** * Subclasses can call this method to make sure the last KV is within multi-writer range. * @param right The right boundary of the writer. - * @param row The row to check. - * @param rowOffset Offset for row. - * @param rowLength Length for row. + * @param row The cell whose row has to be checked. */ protected void sanityCheckRight( - byte[] right, byte[] row, int rowOffset, int rowLength) throws IOException { + byte[] right, Cell cell) throws IOException { if (StripeStoreFileManager.OPEN_KEY != right && - comparator.compareRows(row, rowOffset, rowLength, right, 0, right.length) >= 0) { + comparator.compareRows(cell, right, 0, right.length) >= 0) { String error = "The last row is higher or equal than the right boundary of [" - + Bytes.toString(right) + "]: [" + Bytes.toString(row, rowOffset, rowLength) + "]"; + + Bytes.toString(right) + "]: [" + + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]"; LOG.error(error); throw new IOException(error); } @@ -197,7 +196,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { if (currentWriter == null && existingWriters.isEmpty()) { // First append ever, do a sanity check. sanityCheckLeft(this.boundaries.get(0), - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + cell); } prepareWriterFor(cell); currentWriter.append(cell); @@ -207,7 +206,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { private boolean isCellAfterCurrentWriter(Cell cell) { return ((currentWriterEndKey != StripeStoreFileManager.OPEN_KEY) && - (comparator.compareRows(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), + (comparator.compareRows(cell, currentWriterEndKey, 0, currentWriterEndKey.length) >= 0)); } @@ -219,7 +218,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { } if (lastCell != null) { sanityCheckRight(boundaries.get(boundaries.size() - 1), - lastCell.getRowArray(), lastCell.getRowOffset(), lastCell.getRowLength()); + lastCell); } } @@ -326,10 +325,10 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { boolean doCreateWriter = false; if (currentWriter == null) { // First append ever, do a sanity check. - sanityCheckLeft(left, cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + sanityCheckLeft(left, cell); doCreateWriter = true; } else if (lastRowInCurrentWriter != null - && !comparator.matchingRows(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), + && !CellUtil.matchingRow(cell, lastRowInCurrentWriter, 0, lastRowInCurrentWriter.length)) { if (LOG.isDebugEnabled()) { LOG.debug("Stopping to use a writer after [" + Bytes.toString(lastRowInCurrentWriter) @@ -382,7 +381,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { } if (lastCell != null) { sanityCheckRight( - right, lastCell.getRowArray(), lastCell.getRowOffset(), lastCell.getRowLength()); + right, lastCell); } // When expired stripes were going to be merged into one, and if no writer was created during diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java index b910527..68b654e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java @@ -26,8 +26,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy; @@ -57,7 +57,7 @@ public class StripeStoreEngine extends StoreEngine data; - final KeyValue.KVComparator comparator; + final CellComparator comparator; private Iterator iter; private Cell current; public CollectionBackedScanner(SortedSet set) { - this(set, KeyValue.COMPARATOR); + this(set, CellComparator.COMPARATOR); } public CollectionBackedScanner(SortedSet set, - KeyValue.KVComparator comparator) { + CellComparator comparator) { this.comparator = comparator; data = set; init(); } public CollectionBackedScanner(List list) { - this(list, KeyValue.COMPARATOR); + this(list, CellComparator.COMPARATOR); } public CollectionBackedScanner(List list, - KeyValue.KVComparator comparator) { + CellComparator comparator) { Collections.sort(list, comparator); this.comparator = comparator; data = list; init(); } - public CollectionBackedScanner(KeyValue.KVComparator comparator, + public CollectionBackedScanner(CellComparator comparator, Cell... array) { this.comparator = comparator; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java index beda805..f933315 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -84,13 +83,13 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase @Override public boolean contains(byte[] key, int keyOffset, int keyLength, - ByteBuffer bloom) { + ByteBuffer bloom, boolean useByteComp) { // We try to store the result in this variable so we can update stats for // testing, but when an error happens, we log a message and return. boolean result; int block = index.rootBlockContainingKey(key, keyOffset, - keyLength); + keyLength, useByteComp); if (block < 0) { result = false; // This key is not in the file. } else { @@ -131,11 +130,6 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase return numChunks; } - @Override - public KVComparator getComparator() { - return comparator; - } - public void enableTestingStats() { numQueriesPerChunk = new long[numChunks]; numPositivesPerChunk = new long[numChunks]; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java index af9fa00..48544c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java @@ -20,8 +20,9 @@ package org.apache.hadoop.hbase.util; import org.apache.hadoop.hbase.classification.InterfaceAudience; + +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; @InterfaceAudience.Private @@ -47,13 +48,12 @@ public class CompoundBloomFilterBase implements BloomFilterBase { protected long totalKeyCount; protected long totalByteSize; protected long totalMaxKeys; + // TODO : This can be removed when we remove RawBytesComparator + protected CellComparator comparator; /** Hash function type to use, as defined in {@link Hash} */ protected int hashType; - /** Comparator used to compare Bloom filter keys */ - protected KVComparator comparator; - @Override public long getMaxKeys() { return totalMaxKeys; @@ -89,9 +89,4 @@ public class CompoundBloomFilterBase implements BloomFilterBase { return kv.getKey(); } - @Override - public KVComparator getComparator() { - return comparator; - } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java index d436a98..a018cee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java @@ -28,7 +28,7 @@ import java.util.Queue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex; import org.apache.hadoop.hbase.io.hfile.InlineBlockWriter; @@ -89,7 +89,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase */ public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, int hashType, int maxFold, boolean cacheOnWrite, - KVComparator comparator) { + CellComparator comparator) { chunkByteSize = ByteBloomFilter.computeFoldableByteSize( chunkByteSizeHint * 8L, maxFold); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index a9cc1c6..0157ea7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -139,7 +139,7 @@ public class CompressionTest { scanner.seekTo(); // position to the start of file // Scanner does not do Cells yet. Do below for now till fixed. cc = scanner.getKeyValue(); - if (CellComparator.compareRows(c, cc) != 0) { + if (CellComparator.COMPARATOR.compareRows(c, cc) != 0) { throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString()); } } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index f8fdd96..65b51f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.RegionLocations; @@ -857,9 +858,9 @@ public class HBaseFsck extends Configured implements Closeable { CacheConfig cacheConf = new CacheConfig(getConf()); hf = HFile.createReader(fs, hfile.getPath(), cacheConf, getConf()); hf.loadFileInfo(); - KeyValue startKv = KeyValue.createKeyValueFromKey(hf.getFirstKey()); + KeyValue startKv = KeyValueUtil.createKeyValueFromKey(hf.getFirstKey()); start = startKv.getRow(); - KeyValue endKv = KeyValue.createKeyValueFromKey(hf.getLastKey()); + KeyValue endKv = KeyValueUtil.createKeyValueFromKey(hf.getLastKey()); end = endKv.getRow(); } catch (IOException ioe) { LOG.warn("Problem reading orphan file " + hfile + ", skipping"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index ff79569..8df61a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -3426,7 +3426,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { int i; for (i = 0; i < minLen - && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0; + && CellComparator.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0; ++i) {} if (additionalMsg == null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java index 4a26a23..42e2811 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java @@ -100,7 +100,7 @@ public class MetaMockingUtil { } //important: sort the kvs so that binary search work - Collections.sort(kvs, KeyValue.META_COMPARATOR); + Collections.sort(kvs, CellComparator.META_COMPARATOR); return Result.create(kvs); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index c29a460..1075528 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -69,12 +69,12 @@ public class TestSerialization { KeyValue kv = new KeyValue(row, fam, qf, ts, val); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); - long l = KeyValue.write(kv, dos); + long l = KeyValueUtil.write(kv, dos); dos.close(); byte [] mb = baos.toByteArray(); ByteArrayInputStream bais = new ByteArrayInputStream(mb); DataInputStream dis = new DataInputStream(bais); - KeyValue deserializedKv = KeyValue.create(dis); + KeyValue deserializedKv = KeyValueUtil.create(dis); assertTrue(Bytes.equals(kv.getBuffer(), deserializedKv.getBuffer())); assertEquals(kv.getOffset(), deserializedKv.getOffset()); assertEquals(kv.getLength(), deserializedKv.getLength()); @@ -104,7 +104,7 @@ public class TestSerialization { DataInputStream dis = new DataInputStream(bais); try { - KeyValue.create(dis); + KeyValueUtil.create(dis); assertTrue(kv_0.equals(kv_1)); } catch (Exception e) { fail("Unexpected Exception" + e.getMessage()); @@ -113,7 +113,7 @@ public class TestSerialization { // length -1 try { // even if we have a good kv now in dis we will just pass length with -1 for simplicity - KeyValue.create(-1, dis); + KeyValueUtil.create(-1, dis); fail("Expected corrupt stream"); } catch (Exception e) { assertEquals("Failed read -1 bytes, stream corrupt?", e.getMessage()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java index aa25dd3..6c76ce4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java @@ -31,6 +31,7 @@ import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; @@ -70,7 +71,7 @@ public class TestResult extends TestCase { */ public void testResultAsCellScanner() throws IOException { Cell [] cells = genKVs(row, family, value, 1, 10); - Arrays.sort(cells, KeyValue.COMPARATOR); + Arrays.sort(cells, CellComparator.COMPARATOR); Result r = Result.create(cells); assertSame(r, cells); // Assert I run over same result multiple times. @@ -92,7 +93,7 @@ public class TestResult extends TestCase { public void testBasicGetColumn() throws Exception { KeyValue [] kvs = genKVs(row, family, value, 1, 100); - Arrays.sort(kvs, KeyValue.COMPARATOR); + Arrays.sort(kvs, CellComparator.COMPARATOR); Result r = Result.create(kvs); @@ -114,7 +115,7 @@ public class TestResult extends TestCase { System.arraycopy(kvs1, 0, kvs, 0, kvs1.length); System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length); - Arrays.sort(kvs, KeyValue.COMPARATOR); + Arrays.sort(kvs, CellComparator.COMPARATOR); Result r = Result.create(kvs); for (int i = 0; i < 100; ++i) { @@ -131,7 +132,7 @@ public class TestResult extends TestCase { public void testBasicGetValue() throws Exception { KeyValue [] kvs = genKVs(row, family, value, 1, 100); - Arrays.sort(kvs, KeyValue.COMPARATOR); + Arrays.sort(kvs, CellComparator.COMPARATOR); Result r = Result.create(kvs); @@ -151,7 +152,7 @@ public class TestResult extends TestCase { System.arraycopy(kvs1, 0, kvs, 0, kvs1.length); System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length); - Arrays.sort(kvs, KeyValue.COMPARATOR); + Arrays.sort(kvs, CellComparator.COMPARATOR); Result r = Result.create(kvs); for (int i = 0; i < 100; ++i) { @@ -165,7 +166,7 @@ public class TestResult extends TestCase { public void testBasicLoadValue() throws Exception { KeyValue [] kvs = genKVs(row, family, value, 1, 100); - Arrays.sort(kvs, KeyValue.COMPARATOR); + Arrays.sort(kvs, CellComparator.COMPARATOR); Result r = Result.create(kvs); ByteBuffer loadValueBuffer = ByteBuffer.allocate(1024); @@ -190,7 +191,7 @@ public class TestResult extends TestCase { System.arraycopy(kvs1, 0, kvs, 0, kvs1.length); System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length); - Arrays.sort(kvs, KeyValue.COMPARATOR); + Arrays.sort(kvs, CellComparator.COMPARATOR); ByteBuffer loadValueBuffer = ByteBuffer.allocate(1024); @@ -279,7 +280,7 @@ public class TestResult extends TestCase { KeyValue [] kvs = genKVs(Bytes.toBytes(rowSB.toString()), family, Bytes.toBytes(valueSB.toString()), 1, n); - Arrays.sort(kvs, KeyValue.COMPARATOR); + Arrays.sort(kvs, CellComparator.COMPARATOR); ByteBuffer loadValueBuffer = ByteBuffer.allocate(1024); Result r = Result.create(kvs); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java index b51de80..73fbd99 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -107,13 +108,13 @@ public class TestCellMessageCodec { Codec.Decoder decoder = cmc.getDecoder(dis); assertTrue(decoder.advance()); Cell c = decoder.current(); - assertTrue(CellComparator.equals(c, kv1)); + assertTrue(CellUtil.equals(c, kv1)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv2)); + assertTrue(CellUtil.equals(c, kv2)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellComparator.equals(c, kv3)); + assertTrue(CellUtil.equals(c, kv3)); assertFalse(decoder.advance()); dis.close(); assertEquals(offset, cis.getCount()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java index add549a..defbaab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java @@ -31,6 +31,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -152,7 +153,7 @@ public class TestDependentColumnFilter { for (boolean done = true; done; i++) { done = scanner.next(results); Arrays.sort(results.toArray(new KeyValue[results.size()]), - KeyValue.COMPARATOR); + CellComparator.COMPARATOR); LOG.info("counter=" + i + ", " + results); if (results.isEmpty()) break; cells += results.size(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java index 5fcf64e..02c4c9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java @@ -31,6 +31,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -1619,7 +1620,7 @@ public class TestFilter { for (boolean done = true; done; i++) { done = scanner.next(results); Arrays.sort(results.toArray(new KeyValue[results.size()]), - KeyValue.COMPARATOR); + CellComparator.COMPARATOR); LOG.info("counter=" + i + ", " + results); if (results.isEmpty()) break; assertTrue("Scanned too many rows! Only expected " + expectedRows + @@ -1641,7 +1642,7 @@ public class TestFilter { for (boolean done = true; done; i++) { done = scanner.next(results); Arrays.sort(results.toArray(new KeyValue[results.size()]), - KeyValue.COMPARATOR); + CellComparator.COMPARATOR); LOG.info("counter=" + i + ", " + results); if(results.isEmpty()) break; assertTrue("Scanned too many rows! Only expected " + expectedRows + @@ -1663,7 +1664,7 @@ public class TestFilter { for (boolean done = true; done; row++) { done = scanner.next(results); Arrays.sort(results.toArray(new KeyValue[results.size()]), - KeyValue.COMPARATOR); + CellComparator.COMPARATOR); if(results.isEmpty()) break; assertTrue("Scanned too many keys! Only expected " + kvs.length + " total but already scanned " + (results.size() + idx) + @@ -1694,7 +1695,7 @@ public class TestFilter { for (boolean more = true; more; row++) { more = scanner.next(results); Arrays.sort(results.toArray(new KeyValue[results.size()]), - KeyValue.COMPARATOR); + CellComparator.COMPARATOR); if(results.isEmpty()) break; assertTrue("Scanned too many keys! Only expected " + kvs.length + " total but already scanned " + (results.size() + idx) + diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java index 759435b..0e01484 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java @@ -29,6 +29,7 @@ import java.util.Arrays; import java.util.List; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -458,7 +459,7 @@ public class TestFilterList { // Should take the min if given two hints FilterList filterList = new FilterList(Operator.MUST_PASS_ONE, Arrays.asList(new Filter [] { filterMinHint, filterMaxHint } )); - assertEquals(0, KeyValue.COMPARATOR.compare(filterList.getNextCellHint(null), + assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null), minKeyValue)); // Should have no hint if any filter has no hint @@ -473,7 +474,7 @@ public class TestFilterList { // Should give max hint if its the only one filterList = new FilterList(Operator.MUST_PASS_ONE, Arrays.asList(new Filter [] { filterMaxHint, filterMaxHint } )); - assertEquals(0, KeyValue.COMPARATOR.compare(filterList.getNextCellHint(null), + assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue)); // MUST PASS ALL @@ -482,13 +483,13 @@ public class TestFilterList { filterList = new FilterList(Operator.MUST_PASS_ALL, Arrays.asList(new Filter [] { filterMinHint, filterMaxHint } )); filterList.filterKeyValue(null); - assertEquals(0, KeyValue.COMPARATOR.compare(filterList.getNextCellHint(null), + assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null), minKeyValue)); filterList = new FilterList(Operator.MUST_PASS_ALL, Arrays.asList(new Filter [] { filterMaxHint, filterMinHint } )); filterList.filterKeyValue(null); - assertEquals(0, KeyValue.COMPARATOR.compare(filterList.getNextCellHint(null), + assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue)); // Should have first hint even if a filter has no hint @@ -496,17 +497,17 @@ public class TestFilterList { Arrays.asList( new Filter [] { filterNoHint, filterMinHint, filterMaxHint } )); filterList.filterKeyValue(null); - assertEquals(0, KeyValue.COMPARATOR.compare(filterList.getNextCellHint(null), + assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null), minKeyValue)); filterList = new FilterList(Operator.MUST_PASS_ALL, Arrays.asList(new Filter [] { filterNoHint, filterMaxHint } )); filterList.filterKeyValue(null); - assertEquals(0, KeyValue.COMPARATOR.compare(filterList.getNextCellHint(null), + assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue)); filterList = new FilterList(Operator.MUST_PASS_ALL, Arrays.asList(new Filter [] { filterNoHint, filterMinHint } )); filterList.filterKeyValue(null); - assertEquals(0, KeyValue.COMPARATOR.compare(filterList.getNextCellHint(null), + assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null), minKeyValue)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java index 7aa298c..e23a394 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.filter; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.testclassification.FilterTests; @@ -67,8 +68,8 @@ public class TestSingleColumnValueExcludeFilter { filter.filterRowCells(kvs); assertEquals("resultSize", kvs.size(), 2); - assertTrue("leftKV1", KeyValue.COMPARATOR.compare(kvs.get(0), kv) == 0); - assertTrue("leftKV2", KeyValue.COMPARATOR.compare(kvs.get(1), kv) == 0); + assertTrue("leftKV1", CellComparator.COMPARATOR.compare(kvs.get(0), kv) == 0); + assertTrue("leftKV2", CellComparator.COMPARATOR.compare(kvs.get(1), kv) == 0); assertFalse("allRemainingWhenMatch", filter.filterAllRemaining()); // A 'mismatch' situation diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 18595a8..7e2f1c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -101,7 +102,7 @@ public class TestHalfStoreFileReader { HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf); r.loadFileInfo(); byte [] midkey = r.midkey(); - KeyValue midKV = KeyValue.createKeyValueFromKey(midkey); + KeyValue midKV = KeyValueUtil.createKeyValueFromKey(midkey); midkey = midKV.getRow(); //System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey)); @@ -167,7 +168,7 @@ public class TestHalfStoreFileReader { HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf); r.loadFileInfo(); byte[] midkey = r.midkey(); - KeyValue midKV = KeyValue.createKeyValueFromKey(midkey); + KeyValue midKV = KeyValueUtil.createKeyValueFromKey(midkey); midkey = midKV.getRow(); Reference bottom = new Reference(midkey, Reference.Range.bottom); @@ -176,7 +177,7 @@ public class TestHalfStoreFileReader { // Ugly code to get the item before the midkey KeyValue beforeMidKey = null; for (KeyValue item : items) { - if (KeyValue.COMPARATOR.compare(item, midKV) >= 0) { + if (CellComparator.COMPARATOR.compare(item, midKV) >= 0) { break; } beforeMidKey = item; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java index cabb67f..fbe8164 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java @@ -30,6 +30,7 @@ import java.util.Collection; import java.util.List; import java.util.Random; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -185,7 +186,7 @@ public class TestDataBlockEncoders { .withIncludesTags(includesTags) .withCompression(Compression.Algorithm.NONE) .build(); - DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, + DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta)); seeker.setCurrentBuffer(encodedBuffer); encodedSeekers.add(seeker); @@ -251,7 +252,7 @@ public class TestDataBlockEncoders { .withIncludesTags(includesTags) .withCompression(Compression.Algorithm.NONE) .build(); - DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, + DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta)); seeker.setCurrentBuffer(encodedBuffer); int i = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java index ee664bd..2d478a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java @@ -36,6 +36,7 @@ import java.util.concurrent.ConcurrentSkipListSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; @@ -70,7 +71,7 @@ public class TestPrefixTreeEncoding { private int numBatchesWritten = 0; private ConcurrentSkipListSet kvset = new ConcurrentSkipListSet( - KeyValue.COMPARATOR); + CellComparator.COMPARATOR); private static boolean formatRowNum = false; @@ -110,7 +111,7 @@ public class TestPrefixTreeEncoding { DataOutputStream userDataStream = new DataOutputStream(baosInMemory); generateFixedTestData(kvset, batchId, false, includesTag, encoder, blkEncodingCtx, userDataStream); - EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, + EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta)); byte[] onDiskBytes = baosInMemory.toByteArray(); ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, @@ -157,7 +158,7 @@ public class TestPrefixTreeEncoding { DataBlockEncoding.PREFIX_TREE, new byte[0], meta); generateRandomTestData(kvset, numBatchesWritten++, includesTag, encoder, blkEncodingCtx, userDataStream); - EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, + EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta)); byte[] onDiskBytes = baosInMemory.toByteArray(); ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, @@ -167,7 +168,7 @@ public class TestPrefixTreeEncoding { do { Cell currentKV = seeker.getKeyValue(); System.out.println(currentKV); - if (previousKV != null && KeyValue.COMPARATOR.compare(currentKV, previousKV) < 0) { + if (previousKV != null && CellComparator.COMPARATOR.compare(currentKV, previousKV) < 0) { dumpInputKVSet(); fail("Current kv " + currentKV + " is smaller than previous keyvalue " + previousKV); } @@ -195,7 +196,7 @@ public class TestPrefixTreeEncoding { HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext( DataBlockEncoding.PREFIX_TREE, new byte[0], meta); generateRandomTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream); - EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, + EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta)); byte[] onDiskBytes = baosInMemory.toByteArray(); ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, @@ -218,7 +219,7 @@ public class TestPrefixTreeEncoding { ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream(); DataOutputStream userDataStream = new DataOutputStream(baosInMemory); generateFixedTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream); - EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, + EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta)); byte[] onDiskBytes = baosInMemory.toByteArray(); ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, @@ -245,7 +246,7 @@ public class TestPrefixTreeEncoding { fail("Get error result after seeking " + firstOnRow); } if (hasMoreOfEncodeScanner) { - if (KeyValue.COMPARATOR.compare(encodeSeeker.getKeyValue(), + if (CellComparator.COMPARATOR.compare(encodeSeeker.getKeyValue(), collectionScanner.peek()) != 0) { dumpInputKVSet(); fail("Expected " + collectionScanner.peek() + " actual " diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java index c053449..02f0058 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -266,7 +267,7 @@ public class TestSeekToBlockWithEncoders { HConstants.HFILEBLOCK_DUMMY_HEADER, meta); ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs, encodingContext); - DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, + DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta)); seeker.setCurrentBuffer(encodedBuffer); encodedSeekers.add(seeker); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 0622f55..c410c13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -36,6 +36,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -379,7 +380,7 @@ public class TestCacheOnWrite { .withBlockSize(DATA_BLOCK_SIZE).withDataBlockEncoding(encoder.getDataBlockEncoding()) .withIncludesTags(useTags).build(); StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs) - .withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR) + .withOutputDir(storeFileParentDir).withComparator(CellComparator.COMPARATOR) .withFileContext(meta) .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build(); byte[] cf = Bytes.toBytes("fam"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java index 97784cb..95063ce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java @@ -18,6 +18,10 @@ */ package org.apache.hadoop.hbase.io.hfile; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; @@ -27,6 +31,12 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -38,15 +48,6 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import static org.junit.Assert.*; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; - @RunWith(Parameterized.class) @Category({IOTests.class, SmallTests.class}) public class TestFixedFileTrailer { @@ -96,7 +97,7 @@ public class TestFixedFileTrailer { t.setLastDataBlockOffset(291); t.setNumDataIndexLevels(3); - t.setComparatorClass(KeyValue.COMPARATOR.getClass()); + t.setComparatorClass(CellComparator.COMPARATOR.getClass()); t.setFirstDataBlockOffset(9081723123L); // Completely unrealistic. t.setUncompressedDataIndexSize(827398717L); // Something random. @@ -177,7 +178,7 @@ public class TestFixedFileTrailer { t.setEntryCount(((long) Integer.MAX_VALUE) + 1); t.setLastDataBlockOffset(291); t.setNumDataIndexLevels(3); - t.setComparatorClass(KeyValue.COMPARATOR.getClass()); + t.setComparatorClass(CellComparator.COMPARATOR.getClass()); t.setFirstDataBlockOffset(9081723123L); // Completely unrealistic. t.setUncompressedDataIndexSize(827398717L); // Something random. t.setLoadOnOpenOffset(128); @@ -275,7 +276,7 @@ public class TestFixedFileTrailer { assertEquals(expected.getFirstDataBlockOffset(), loaded.getFirstDataBlockOffset()); assertTrue( - expected.createComparator() instanceof KeyValue.KVComparator); + expected.createComparator() instanceof CellComparator); assertEquals(expected.getUncompressedDataIndexSize(), loaded.getUncompressedDataIndexSize()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 9e4b1c7..36f658e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -32,11 +32,13 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFile.Reader; @@ -251,7 +253,7 @@ public class TestHFile extends HBaseTestCase { Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(fout) .withFileContext(meta) - .withComparator(new KeyValue.KVComparator()) + .withComparator(CellComparator.COMPARATOR) .create(); LOG.info(writer); writeRecords(writer, useTags); @@ -267,18 +269,18 @@ public class TestHFile extends HBaseTestCase { // Align scanner at start of the file. scanner.seekTo(); readAllRecords(scanner); - int seekTo = scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(50))); + int seekTo = scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(50))); System.out.println(seekTo); assertTrue("location lookup failed", - scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(50))) == 0); + scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(50))) == 0); // read the key and see if it matches ByteBuffer readKey = scanner.getKey(); assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50), Bytes.toBytes(readKey))); - scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(0))); + scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(0))); ByteBuffer val1 = scanner.getValue(); - scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(0))); + scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(0))); ByteBuffer val2 = scanner.getValue(); assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java index eb1f1bb..543e9de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java @@ -47,6 +47,7 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -170,7 +171,7 @@ public class TestHFileBlock { // sort it and write to stream int totalSize = 0; - Collections.sort(keyValues, KeyValue.COMPARATOR); + Collections.sort(keyValues, CellComparator.COMPARATOR); for (KeyValue kv : keyValues) { totalSize += kv.getLength(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index 0ee9d14..d88282a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -42,9 +42,11 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -204,7 +206,7 @@ public class TestHFileBlockIndex { BlockReaderWrapper brw = new BlockReaderWrapper(blockReader); HFileBlockIndex.BlockIndexReader indexReader = new HFileBlockIndex.BlockIndexReader( - KeyValue.RAW_COMPARATOR, numLevels, brw); + CellComparator.RAW_COMPARATOR, numLevels, brw); indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset, fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries); @@ -217,10 +219,12 @@ public class TestHFileBlockIndex { for (byte[] key : keys) { assertTrue(key != null); assertTrue(indexReader != null); + KeyValue.KeyOnlyKeyValue keyOnlyKey = new KeyValue.KeyOnlyKeyValue(key, 0, key.length); HFileBlock b = - indexReader.seekToDataBlock(new KeyValue.KeyOnlyKeyValue(key, 0, key.length), null, true, + indexReader.seekToDataBlock(keyOnlyKey, null, true, true, false, null); - if (KeyValue.COMPARATOR.compareFlatKey(key, firstKeyInFile) < 0) { + if (CellComparator.COMPARATOR.compare(keyOnlyKey, firstKeyInFile, + 0, firstKeyInFile.length) < 0) { assertTrue(b == null); ++i; continue; @@ -364,7 +368,7 @@ public class TestHFileBlockIndex { // Make sure the keys are increasing. for (int i = 0; i < keys.size() - 1; ++i) - assertTrue(KeyValue.COMPARATOR.compare( + assertTrue(CellComparator.COMPARATOR.compare( new KeyValue.KeyOnlyKeyValue(keys.get(i), 0, keys.get(i).length), new KeyValue.KeyOnlyKeyValue(keys.get(i + 1), 0, keys.get(i + 1).length)) < 0); @@ -403,7 +407,7 @@ public class TestHFileBlockIndex { KeyValue.KeyOnlyKeyValue cell = new KeyValue.KeyOnlyKeyValue( arrayHoldingKey, searchKey.length / 2, searchKey.length); int searchResult = BlockIndexReader.binarySearchNonRootIndex(cell, - nonRootIndex, KeyValue.COMPARATOR); + nonRootIndex, CellComparator.COMPARATOR); String lookupFailureMsg = "Failed to look up key #" + i + " (" + Bytes.toStringBinary(searchKey) + ")"; @@ -429,7 +433,7 @@ public class TestHFileBlockIndex { // higher-level API function.s boolean locateBlockResult = (BlockIndexReader.locateNonRootIndexEntry(nonRootIndex, cell, - KeyValue.COMPARATOR) != -1); + CellComparator.COMPARATOR) != -1); if (i == 0) { assertFalse(locateBlockResult); @@ -489,7 +493,7 @@ public class TestHFileBlockIndex { long expected = ClassSize.estimateBase(cl, false); HFileBlockIndex.BlockIndexReader bi = - new HFileBlockIndex.BlockIndexReader(KeyValue.RAW_COMPARATOR, 1); + new HFileBlockIndex.BlockIndexReader(CellComparator.RAW_COMPARATOR, 1); long actual = bi.heapSize(); // Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets, @@ -557,8 +561,8 @@ public class TestHFileBlockIndex { values[i] = CellUtil.cloneValue(kv); keyStrSet.add(Bytes.toStringBinary(k)); if (i > 0) { - assertTrue(KeyValue.COMPARATOR.compareFlatKey(keys[i - 1], - keys[i]) < 0); + assertTrue((-CellComparator.COMPARATOR.compare(kv, keys[i - 1], + 0, keys[i - 1].length)) < 0); } } @@ -640,7 +644,7 @@ public class TestHFileBlockIndex { private void checkSeekTo(byte[][] keys, HFileScanner scanner, int i) throws IOException { assertEquals("Failed to seek to key #" + i + " (" + Bytes.toStringBinary(keys[i]) + ")", 0, - scanner.seekTo(KeyValue.createKeyValueFromKey(keys[i]))); + scanner.seekTo(KeyValueUtil.createKeyValueFromKey(keys[i]))); } private void assertArrayEqualsBuffer(String msgPrefix, byte[] arr, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java index 26adb49..84ba52e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RawLocalFileSystem; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.hfile.HFile.Reader; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -195,7 +196,7 @@ public class TestHFileSeek extends TestCase { kSampler.next(key); byte [] k = new byte [key.getLength()]; System.arraycopy(key.getBytes(), 0, k, 0, key.getLength()); - if (scanner.seekTo(KeyValue.createKeyValueFromKey(k)) >= 0) { + if (scanner.seekTo(KeyValueUtil.createKeyValueFromKey(k)) >= 0) { ByteBuffer bbkey = scanner.getKey(); ByteBuffer bbval = scanner.getValue(); totalBytes += bbkey.limit(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java index ca063bc..fbd513e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java @@ -38,9 +38,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; @@ -146,13 +146,13 @@ public class TestHFileWriterV2 { HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(fsdis, fileSize, meta); // Comparator class name is stored in the trailer in version 2. - KVComparator comparator = trailer.createComparator(); + CellComparator comparator = trailer.createComparator(); HFileBlockIndex.BlockIndexReader dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator, trailer.getNumDataIndexLevels()); HFileBlockIndex.BlockIndexReader metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader( - KeyValue.RAW_COMPARATOR, 1); + CellComparator.RAW_COMPARATOR, 1); HFileBlock.BlockIterator blockIter = blockReader.blockRange( trailer.getLoadOnOpenDataOffset(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java index 2ca9273..2cb75e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java @@ -38,10 +38,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; @@ -122,7 +122,7 @@ public class TestHFileWriterV3 { HFile.Writer writer = new HFileWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, hfilePath) .withFileContext(context) - .withComparator(KeyValue.COMPARATOR) + .withComparator(CellComparator.COMPARATOR) .create(); Random rand = new Random(9713312); // Just a fixed seed. @@ -176,13 +176,13 @@ public class TestHFileWriterV3 { HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(fsdis, fileSize, meta); // Comparator class name is stored in the trailer in version 2. - KVComparator comparator = trailer.createComparator(); + CellComparator comparator = trailer.createComparator(); HFileBlockIndex.BlockIndexReader dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator, trailer.getNumDataIndexLevels()); HFileBlockIndex.BlockIndexReader metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader( - KeyValue.RAW_COMPARATOR, 1); + CellComparator.RAW_COMPARATOR, 1); HFileBlock.BlockIterator blockIter = blockReader.blockRange( trailer.getLoadOnOpenDataOffset(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index 6a12616..dbc5dd0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -27,6 +27,7 @@ import java.util.Random; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.fs.HFileSystem; @@ -106,7 +107,7 @@ public class TestPrefetch { .build(); StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs) .withOutputDir(storeFileParentDir) - .withComparator(KeyValue.COMPARATOR) + .withComparator(CellComparator.COMPARATOR) .withFileContext(meta) .build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java index 9d7de02..a17368c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java @@ -26,6 +26,7 @@ import java.util.List; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -64,7 +65,7 @@ public class TestReseekTo { .withOutputStream(fout) .withFileContext(context) // NOTE: This test is dependent on this deprecated nonstandard comparator - .withComparator(KeyValue.COMPARATOR) + .withComparator(CellComparator.COMPARATOR) .create(); int numberOfKeys = 1000; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java index 69bc09d..ca3afaf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java @@ -23,6 +23,7 @@ import java.io.IOException; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -79,7 +80,7 @@ public class TestSeekTo extends HBaseTestCase { .withIncludesTags(true).build(); HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout) .withFileContext(context) - .withComparator(KeyValue.COMPARATOR).create(); + .withComparator(CellComparator.COMPARATOR).create(); // 4 bytes * 3 * 2 for each key/value + // 3 for keys, 15 for values = 42 (woot) writer.append(toKV("c", tagUsage)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java index 53c55be..3f87a00 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java @@ -19,13 +19,13 @@ package org.apache.hadoop.hbase.regionserver; -import org.apache.hadoop.hbase.regionserver.KeyValueScanner; -import org.apache.hadoop.hbase.util.CollectionBackedScanner; -import org.apache.hadoop.hbase.KeyValue; - import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.util.CollectionBackedScanner; + /** * A fixture that implements and presents a KeyValueScanner. * It takes a list of key/values which is then sorted according @@ -33,7 +33,7 @@ import java.util.List; * to be a store file scanner. */ public class KeyValueScanFixture extends CollectionBackedScanner { - public KeyValueScanFixture(KeyValue.KVComparator comparator, + public KeyValueScanFixture(CellComparator comparator, KeyValue... incData) { super(comparator, incData); } @@ -41,7 +41,7 @@ public class KeyValueScanFixture extends CollectionBackedScanner { public static List scanFixture(KeyValue[] ... kvArrays) { ArrayList scanners = new ArrayList(); for (KeyValue [] kvs : kvArrays) { - scanners.add(new KeyValueScanFixture(KeyValue.COMPARATOR, kvs)); + scanners.add(new KeyValueScanFixture(CellComparator.COMPARATOR, kvs)); } return scanners; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java index 3a12674..f99226f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java @@ -23,8 +23,8 @@ import java.util.Map; import java.util.TreeMap; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.util.Bytes; @@ -54,7 +54,7 @@ public class MockStoreFile extends StoreFile { } @Override - byte[] getFileSplitPoint(KVComparator comparator) throws IOException { + byte[] getFileSplitPoint(CellComparator comparator) throws IOException { return this.splitPoint; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java index 9b9db5a..c4e0a42 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java @@ -24,6 +24,7 @@ import java.util.SortedSet; import junit.framework.TestCase; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -33,7 +34,7 @@ import org.junit.experimental.categories.Category; @Category({RegionServerTests.class, SmallTests.class}) public class TestCellSkipListSet extends TestCase { private final CellSkipListSet csls = - new CellSkipListSet(KeyValue.COMPARATOR); + new CellSkipListSet(CellComparator.COMPARATOR); protected void setUp() throws Exception { super.setUp(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java index d7b4a04..9bd9099 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java @@ -36,8 +36,11 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.client.Scan; @@ -141,7 +144,7 @@ public class TestCompoundBloomFilter { List kvList = new ArrayList(n); for (int i = 0; i < n; ++i) kvList.add(TestHFileWriterV2.randomKeyValue(rand)); - Collections.sort(kvList, KeyValue.COMPARATOR); + Collections.sort(kvList, CellComparator.COMPARATOR); return kvList; } @@ -315,8 +318,8 @@ public class TestCompoundBloomFilter { // Validate the key count in the Bloom filter. boolean newKey = true; if (prev != null) { - newKey = !(bt == BloomType.ROW ? KeyValue.COMPARATOR.matchingRows(kv, - prev) : KeyValue.COMPARATOR.matchingRowColumn(kv, prev)); + newKey = !(bt == BloomType.ROW ? CellUtil.matchingRows(kv, + prev) : CellUtil.matchingRowColumn(kv, prev)); } if (newKey) ++keyCount; @@ -354,8 +357,8 @@ public class TestCompoundBloomFilter { row, 0, 0); byte[] rowColKey = cbfb.createBloomKey(row, 0, row.length, qualifier, 0, qualifier.length); - KeyValue rowKV = KeyValue.createKeyValueFromKey(rowKey); - KeyValue rowColKV = KeyValue.createKeyValueFromKey(rowColKey); + KeyValue rowKV = KeyValueUtil.createKeyValueFromKey(rowKey); + KeyValue rowColKV = KeyValueUtil.createKeyValueFromKey(rowColKey); assertEquals(rowKV.getTimestamp(), rowColKV.getTimestamp()); assertEquals(Bytes.toStringBinary(rowKV.getRow()), Bytes.toStringBinary(rowColKV.getRow())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 622c145..7f5b1d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -474,7 +475,7 @@ public class TestDefaultMemStore extends TestCase { } public void testMultipleVersionsSimple() throws Exception { - DefaultMemStore m = new DefaultMemStore(new Configuration(), KeyValue.COMPARATOR); + DefaultMemStore m = new DefaultMemStore(new Configuration(), CellComparator.COMPARATOR); byte [] row = Bytes.toBytes("testRow"); byte [] family = Bytes.toBytes("testFamily"); byte [] qf = Bytes.toBytes("testQualifier"); @@ -506,7 +507,7 @@ public class TestDefaultMemStore extends TestCase { Thread.sleep(1); addRows(this.memstore); Cell closestToEmpty = this.memstore.getNextRow(KeyValue.LOWESTKEY); - assertTrue(KeyValue.COMPARATOR.compareRows(closestToEmpty, + assertTrue(CellComparator.COMPARATOR.compareRows(closestToEmpty, new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0); for (int i = 0; i < ROW_COUNT; i++) { Cell nr = this.memstore.getNextRow(new KeyValue(Bytes.toBytes(i), @@ -514,7 +515,7 @@ public class TestDefaultMemStore extends TestCase { if (i + 1 == ROW_COUNT) { assertEquals(nr, null); } else { - assertTrue(KeyValue.COMPARATOR.compareRows(nr, + assertTrue(CellComparator.COMPARATOR.compareRows(nr, new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0); } } @@ -533,8 +534,7 @@ public class TestDefaultMemStore extends TestCase { byte[] row1 = Bytes.toBytes(rowId); assertTrue( "Row name", - KeyValue.COMPARATOR.compareRows(left.getRowArray(), left.getRowOffset(), - (int) left.getRowLength(), row1, 0, row1.length) == 0); + CellComparator.COMPARATOR.compareRows(left, row1, 0, row1.length) == 0); assertEquals("Count of columns", QUALIFIER_COUNT, results.size()); List row = new ArrayList(); for (Cell kv : results) { @@ -786,7 +786,7 @@ public class TestDefaultMemStore extends TestCase { public void testUpsertMSLAB() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.setBoolean(DefaultMemStore.USEMSLAB_KEY, true); - memstore = new DefaultMemStore(conf, KeyValue.COMPARATOR); + memstore = new DefaultMemStore(conf, CellComparator.COMPARATOR); int ROW_SIZE = 2048; byte[] qualifier = new byte[ROW_SIZE - 4]; @@ -827,7 +827,7 @@ public class TestDefaultMemStore extends TestCase { */ public void testUpsertMemstoreSize() throws Exception { Configuration conf = HBaseConfiguration.create(); - memstore = new DefaultMemStore(conf, KeyValue.COMPARATOR); + memstore = new DefaultMemStore(conf, CellComparator.COMPARATOR); long oldSize = memstore.size.get(); List l = new ArrayList(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java index c185075..2ce77f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java @@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; @@ -60,7 +60,7 @@ public class TestDefaultStoreEngine { conf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, DummyStoreFlusher.class.getName()); Store mockStore = Mockito.mock(Store.class); - StoreEngine se = StoreEngine.create(mockStore, conf, new KVComparator()); + StoreEngine se = StoreEngine.create(mockStore, conf, new CellComparator()); Assert.assertTrue(se instanceof DefaultStoreEngine); Assert.assertTrue(se.getCompactionPolicy() instanceof DummyCompactionPolicy); Assert.assertTrue(se.getStoreFlusher() instanceof DummyStoreFlusher); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 110cd36..418e2fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -171,9 +171,9 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { } private byte [] extractRowFromMetaRow(final byte [] b) { - int firstDelimiter = KeyValue.getDelimiter(b, 0, b.length, + int firstDelimiter = Bytes.searchDelimiterIndex(b, 0, b.length, HConstants.DELIMITER); - int lastDelimiter = KeyValue.getDelimiterInReverse(b, 0, b.length, + int lastDelimiter = Bytes.searchDelimiterIndexInReverse(b, 0, b.length, HConstants.DELIMITER); int length = lastDelimiter - firstDelimiter - 1; byte [] row = new byte[length]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 6abe076..5ba2596 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -2700,7 +2700,7 @@ public class TestHRegion { res = new ArrayList(); is.next(res); for (int i = 0; i < res.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected1.get(i), res.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected1.get(i), res.get(i))); } // Result 2 @@ -2711,7 +2711,7 @@ public class TestHRegion { res = new ArrayList(); is.next(res); for (int i = 0; i < res.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected2.get(i), res.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected2.get(i), res.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -2833,7 +2833,7 @@ public class TestHRegion { // Verify result for (int i = 0; i < expected.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -2915,7 +2915,7 @@ public class TestHRegion { // Verify result for (int i = 0; i < expected.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -3037,7 +3037,7 @@ public class TestHRegion { // Verify result for (int i = 0; i < expected.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -3164,7 +3164,7 @@ public class TestHRegion { // Verify result for (int i = 0; i < expected.size(); i++) { - assertTrue(CellComparator.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java index 86a15ff..0fa904c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java @@ -25,6 +25,7 @@ import java.util.Iterator; import java.util.List; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -108,7 +109,7 @@ public class TestKeyValueHeap extends HBaseTestCase { //Creating KeyValueHeap KeyValueHeap kvh = - new KeyValueHeap(scanners, KeyValue.COMPARATOR); + new KeyValueHeap(scanners, CellComparator.COMPARATOR); List actual = new ArrayList(); while(kvh.peek() != null){ @@ -126,7 +127,7 @@ public class TestKeyValueHeap extends HBaseTestCase { //Check if result is sorted according to Comparator for(int i=0; i scanners = getScanners(memstore, sf1, sf2, startRow, true, readPoint); ReversedKeyValueHeap kvHeap = new ReversedKeyValueHeap(scanners, - KeyValue.COMPARATOR); + CellComparator.COMPARATOR); return kvHeap; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java index 1e09c40..ea4db98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java @@ -35,6 +35,7 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -303,7 +304,7 @@ public class TestSeekOptimizations { } } expectedKVs = filteredKVs; - Collections.sort(expectedKVs, KeyValue.COMPARATOR); + Collections.sort(expectedKVs, CellComparator.COMPARATOR); } public void put(String qual, long ts) { @@ -458,7 +459,7 @@ public class TestSeekOptimizations { int i; for (i = 0; i < minLen - && KeyValue.COMPARATOR.compareOnlyKeyPortion(expected.get(i), actual.get(i)) == 0; + && CellComparator.COMPARATOR.compareOnlyKeyPortion(expected.get(i), actual.get(i)) == 0; ++i) {} if (additionalMsg == null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index 59793e0..b9cfeca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -48,6 +50,7 @@ import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -55,7 +58,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -503,7 +505,7 @@ public class TestStore { //this.store.get(get, qualifiers, result); //Need to sort the result since multiple files - Collections.sort(result, KeyValue.COMPARATOR); + Collections.sort(result, CellComparator.COMPARATOR); //Compare assertCheck(); @@ -538,7 +540,7 @@ public class TestStore { get.getRow(), qualifiers); //Need to sort the result since multiple files - Collections.sort(result, KeyValue.COMPARATOR); + Collections.sort(result, CellComparator.COMPARATOR); //Compare assertCheck(); @@ -986,7 +988,7 @@ public class TestStore { public static DefaultCompactor lastCreatedCompactor = null; @Override protected void createComponents( - Configuration conf, Store store, KVComparator comparator) throws IOException { + Configuration conf, Store store, CellComparator comparator) throws IOException { super.createComponents(conf, store, comparator); lastCreatedCompactor = this.compactor; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index e5a5022..cf9244d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -170,9 +170,9 @@ public class TestStoreFile extends HBaseTestCase { // Split on a row, not in middle of row. Midkey returned by reader // may be in middle of row. Create new one with empty column and // timestamp. - KeyValue kv = KeyValue.createKeyValueFromKey(reader.midkey()); + KeyValue kv = KeyValueUtil.createKeyValueFromKey(reader.midkey()); byte [] midRow = kv.getRow(); - kv = KeyValue.createKeyValueFromKey(reader.getLastKey()); + kv = KeyValueUtil.createKeyValueFromKey(reader.getLastKey()); byte [] finalRow = kv.getRow(); // Make a reference HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow); @@ -184,7 +184,7 @@ public class TestStoreFile extends HBaseTestCase { HFileScanner s = refHsf.createReader().getScanner(false, false); for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) { ByteBuffer bb = s.getKey(); - kv = KeyValue.createKeyValueFromKey(bb); + kv = KeyValueUtil.createKeyValueFromKey(bb); if (first) { assertTrue(Bytes.equals(kv.getRow(), midRow)); first = false; @@ -315,7 +315,7 @@ public class TestStoreFile extends HBaseTestCase { private void checkHalfHFile(final HRegionFileSystem regionFs, final StoreFile f) throws IOException { byte [] midkey = f.createReader().midkey(); - KeyValue midKV = KeyValue.createKeyValueFromKey(midkey); + KeyValue midKV = KeyValueUtil.createKeyValueFromKey(midkey); byte [] midRow = midKV.getRow(); // Create top split. HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), @@ -345,8 +345,8 @@ public class TestStoreFile extends HBaseTestCase { (topScanner.isSeeked() && topScanner.next())) { key = topScanner.getKey(); - if (topScanner.getReader().getComparator().compareFlatKey(key.array(), - key.arrayOffset(), key.limit(), midkey, 0, midkey.length) < 0) { + if ((-topScanner.getReader().getComparator().compare(midKV, key.array(), + key.arrayOffset(), key.limit())) < 0) { fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + Bytes.toStringBinary(midkey)); } @@ -391,14 +391,16 @@ public class TestStoreFile extends HBaseTestCase { // Now read from the top. first = true; topScanner = top.getScanner(false, false); + KeyValue.KeyOnlyKeyValue keyOnlyKV = new KeyValue.KeyOnlyKeyValue(); while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) { key = topScanner.getKey(); - assertTrue(topScanner.getReader().getComparator().compareFlatKey(key.array(), - key.arrayOffset(), key.limit(), badmidkey, 0, badmidkey.length) >= 0); + keyOnlyKV.setKey(key.array(), 0 + key.arrayOffset(), key.limit()); + assertTrue(topScanner.getReader().getComparator() + .compare(keyOnlyKV, badmidkey, 0, badmidkey.length) >= 0); if (first) { first = false; - KeyValue keyKV = KeyValue.createKeyValueFromKey(key); + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("First top when key < bottom: " + keyKV); String tmp = Bytes.toString(keyKV.getRow()); for (int i = 0; i < tmp.length(); i++) { @@ -406,7 +408,7 @@ public class TestStoreFile extends HBaseTestCase { } } } - KeyValue keyKV = KeyValue.createKeyValueFromKey(key); + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("Last top when key < bottom: " + keyKV); String tmp = Bytes.toString(keyKV.getRow()); for (int i = 0; i < tmp.length(); i++) { @@ -430,7 +432,7 @@ public class TestStoreFile extends HBaseTestCase { key = bottomScanner.getKey(); if (first) { first = false; - keyKV = KeyValue.createKeyValueFromKey(key); + keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("First bottom when key > top: " + keyKV); tmp = Bytes.toString(keyKV.getRow()); for (int i = 0; i < tmp.length(); i++) { @@ -438,7 +440,7 @@ public class TestStoreFile extends HBaseTestCase { } } } - keyKV = KeyValue.createKeyValueFromKey(key); + keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("Last bottom when key > top: " + keyKV); for (int i = 0; i < tmp.length(); i++) { assertTrue(Bytes.toString(keyKV.getRow()).charAt(i) == 'z'); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index ee39212..bdd9feb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -31,6 +31,7 @@ import java.util.TreeSet; import junit.framework.TestCase; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.KeyValue; @@ -49,7 +50,7 @@ public class TestStoreScanner extends TestCase { private static final String CF_STR = "cf"; final byte [] CF = Bytes.toBytes(CF_STR); private ScanInfo scanInfo = new ScanInfo(CF, 0, Integer.MAX_VALUE, - Long.MAX_VALUE, KeepDeletedCells.FALSE, 0, KeyValue.COMPARATOR); + Long.MAX_VALUE, KeepDeletedCells.FALSE, 0, CellComparator.COMPARATOR); private ScanType scanType = ScanType.USER_SCAN; public void setUp() throws Exception { @@ -82,7 +83,7 @@ public class TestStoreScanner extends TestCase { }; List scanners = Arrays.asList( new KeyValueScanner[] { - new KeyValueScanFixture(KeyValue.COMPARATOR, kvs) + new KeyValueScanFixture(CellComparator.COMPARATOR, kvs) }); Scan scanSpec = new Scan(Bytes.toBytes(r1)); scanSpec.setTimeRange(0, 6); @@ -131,7 +132,7 @@ public class TestStoreScanner extends TestCase { }; List scanners = Arrays.asList( new KeyValueScanner[] { - new KeyValueScanFixture(KeyValue.COMPARATOR, kvs) + new KeyValueScanFixture(CellComparator.COMPARATOR, kvs) }); Scan scanSpec = new Scan(Bytes.toBytes("R1")); @@ -418,7 +419,7 @@ public class TestStoreScanner extends TestCase { Scan scan = new Scan(); scan.setMaxVersions(1); ScanInfo scanInfo = new ScanInfo(CF, 0, 1, 500, KeepDeletedCells.FALSE, 0, - KeyValue.COMPARATOR); + CellComparator.COMPARATOR); ScanType scanType = ScanType.USER_SCAN; StoreScanner scanner = new StoreScanner(scan, scanInfo, scanType, @@ -489,7 +490,7 @@ public class TestStoreScanner extends TestCase { scan.setMaxVersions(1); // scanner with ttl equal to 500 ScanInfo scanInfo = new ScanInfo(CF, 0, 1, 500, KeepDeletedCells.FALSE, 0, - KeyValue.COMPARATOR); + CellComparator.COMPARATOR); ScanType scanType = ScanType.USER_SCAN; StoreScanner scanner = new StoreScanner(scan, scanInfo, scanType, null, scanners); @@ -553,7 +554,7 @@ public class TestStoreScanner extends TestCase { 2 /* maxVersions */, 500 /* ttl */, KeepDeletedCells.FALSE /* keepDeletedCells */, 200, /* timeToPurgeDeletes */ - KeyValue.COMPARATOR); + CellComparator.COMPARATOR); StoreScanner scanner = new StoreScanner(scan, scanInfo, ScanType.COMPACT_DROP_DELETES, null, scanners, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java index b743550..cc62dfa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java @@ -42,11 +42,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -188,7 +188,7 @@ public class TestStripeCompactor { // Create store mock that is satisfactory for compactor. HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS); - ScanInfo si = new ScanInfo(col, Long.MAX_VALUE, 0, new KVComparator()); + ScanInfo si = new ScanInfo(col, Long.MAX_VALUE, 0, new CellComparator()); Store store = mock(Store.class); when(store.getFamily()).thenReturn(col); when(store.getScanInfo()).thenReturn(si); @@ -197,7 +197,7 @@ public class TestStripeCompactor { when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); - when(store.getComparator()).thenReturn(new KVComparator()); + when(store.getComparator()).thenReturn(new CellComparator()); return new StripeCompactor(conf, store) { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java index 32ab164..d9e3ea3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java @@ -33,8 +33,8 @@ import java.util.Arrays; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController; @@ -111,7 +111,7 @@ public class TestStripeStoreEngine { private static TestStoreEngine createEngine(Configuration conf) throws Exception { Store store = mock(Store.class); - KVComparator kvComparator = mock(KVComparator.class); + CellComparator kvComparator = mock(CellComparator.class); return (TestStoreEngine)StoreEngine.create(store, conf, kvComparator); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java index 48f93e0..fa60ab1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java @@ -36,11 +36,11 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -590,7 +590,7 @@ public class TestStripeStoreFileManager { ArrayList sfs, Configuration conf) throws Exception { StripeStoreConfig config = new StripeStoreConfig( conf, Mockito.mock(StoreConfigInformation.class)); - StripeStoreFileManager result = new StripeStoreFileManager(new KVComparator(), conf, config); + StripeStoreFileManager result = new StripeStoreFileManager(new CellComparator(), conf, config); result.loadFiles(sfs); return result; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index 81f8115..99cc047 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -45,6 +45,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -564,7 +565,7 @@ public class TestStripeCompactionPolicy { StoreFileWritersCapture writers = new StoreFileWritersCapture(); StripeStoreFlusher.StripeFlushRequest req = policy.selectFlush(si, input.length); StripeMultiFileWriter mw = req.createWriter(); - mw.init(null, writers, new KeyValue.KVComparator()); + mw.init(null, writers, CellComparator.COMPARATOR); for (KeyValue kv : input) { mw.append(kv); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java index 3710fd6..ecfaa76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.TreeMap; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -270,7 +271,7 @@ public class TestReplicationWALEntryFilters { List cells2 = e2.getEdit().getCells(); Assert.assertEquals(cells1.size(), cells2.size()); for (int i = 0; i < cells1.size(); i++) { - KeyValue.COMPARATOR.compare(cells1.get(i), cells2.get(i)); + CellComparator.COMPARATOR.compare(cells1.get(i), cells2.get(i)); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java index 21d7490..52d4eb5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java @@ -70,7 +70,7 @@ public class TestByteBloomFilter extends TestCase { assertTrue(newBf1.contains(key1, bb)); assertFalse(newBf1.contains(key2, bb)); assertTrue( newBf1.contains(bkey, bb) ); - assertTrue( newBf1.contains(bval, 1, bval.length-1, bb) ); + assertTrue( newBf1.contains(bval, 1, bval.length-1, bb, true) ); assertFalse( newBf1.contains(bval, bb) ); assertFalse( newBf1.contains(bval, bb) );