diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index fb4ec2c..deca7fb 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -1391,7 +1391,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, /** * @return Family offset */ - private int getFamilyOffset(int rlength) { + public int getFamilyOffset(int rlength) { return this.offset + ROW_OFFSET + Bytes.SIZEOF_SHORT + rlength + Bytes.SIZEOF_BYTE; } @@ -1429,7 +1429,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, /** * @return Qualifier offset */ - private int getQualifierOffset(int foffset) { + public int getQualifierOffset(int foffset) { return foffset + getFamilyLength(foffset); } @@ -1438,14 +1438,21 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, */ @Override public int getQualifierLength() { - return getQualifierLength(getRowLength(),getFamilyLength()); + return getQualifierLength(getRowLength(), getFamilyLength()); } /** * @return Qualifier length */ - private int getQualifierLength(int rlength, int flength) { - return getKeyLength() - (int) getKeyDataStructureSize(rlength, flength, 0); + public int getQualifierLength(int rlength, int flength) { + return getQualifierLength(getKeyLength(), rlength, flength); + } + + /** + * @return Qualifier length + */ + public int getQualifierLength(int klength, int rlength, int flength) { + return klength - (int) getKeyDataStructureSize(rlength, flength, 0); } /** @@ -1554,7 +1561,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * @param keylength Pass if you have it to save on a int creation. * @return Timestamp */ - long getTimestamp(final int keylength) { + public long getTimestamp(final int keylength) { int tsOffset = getTimestampOffset(keylength); return Bytes.toLong(this.bytes, tsOffset); } @@ -1572,7 +1579,14 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, */ @Override public byte getTypeByte() { - return this.bytes[this.offset + getKeyLength() - 1 + ROW_OFFSET]; + return getTypeByte(getKeyLength()); + } + + /** + * @return KeyValue.TYPE byte representation + */ + public byte getTypeByte(final int klength) { + return this.bytes[this.offset + klength - 1 + ROW_OFFSET]; } /** @@ -1629,7 +1643,13 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, */ @Override public int getTagsLength() { - int tagsLen = this.length - (getKeyLength() + getValueLength() + KEYVALUE_INFRASTRUCTURE_SIZE); + return getTagsLength(getKeyLength()); + } + /** + * This returns the total length of the tag bytes + */ + public int getTagsLength(final int klength) { + int tagsLen = this.length - (klength + getValueLength() + KEYVALUE_INFRASTRUCTURE_SIZE); if (tagsLen > 0) { // There are some Tag bytes in the byte[]. So reduce 2 bytes which is added to denote the tags // length @@ -2765,6 +2785,10 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, return getKeyOffset() + Bytes.SIZEOF_SHORT; } + public int getRowOffset(final int koffset) { + return koffset + Bytes.SIZEOF_SHORT; + } + @Override public byte[] getFamilyArray() { return bytes; @@ -2807,11 +2831,11 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, @Override public byte getTypeByte() { - return this.bytes[this.offset + getKeyLength() - 1]; + return getTypeByte(getKeyLength()); } - private int getQualifierLength(int rlength, int flength) { - return getKeyLength() - (int) getKeyDataStructureSize(rlength, flength, 0); + public byte getTypeByte(final int klength) { + return this.bytes[this.offset + klength - 1]; } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index dd666e6..eceb1a0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -53,6 +53,7 @@ import sun.misc.Unsafe; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; + import org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer; /** @@ -60,6 +61,7 @@ import org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeCo * comparisons, hash code generation, manufacturing keys for HashMaps or * HashSets, etc. */ +@SuppressWarnings("restriction") @InterfaceAudience.Public @InterfaceStability.Stable public class Bytes { @@ -125,7 +127,12 @@ public class Bytes { // SizeOf which uses java.lang.instrument says 24 bytes. (3 longs?) public static final int ESTIMATED_HEAP_TAX = 16; - + /** + * Mask to apply to a long to reveal the lower int only. Use like this: + * int i = (int)(0xFFFFFFFF00000000l ^ some_long_value); + */ + public static final long MASK_FOR_LOWER_INT_IN_LONG = 0xFFFFFFFF00000000l; + /** * Returns length of the byte array, returning 0 if the array is null. * Useful for calculating sizes. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index b8e4640..1070fa0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; -import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.IdLock; import org.apache.hadoop.io.WritableUtils; @@ -790,16 +789,9 @@ public class HFileReaderV2 extends AbstractHFileReader { } /** - * Go to the next key/value in the block section. Loads the next block if - * necessary. If successful, {@link #getKey()} and {@link #getValue()} can - * be called. - * - * @return true if successfully navigated to the next key/value + * Set the position on current backing blockBuffer. */ - @Override - public boolean next() throws IOException { - assertSeeked(); - + private void positionThisBlockBuffer() { try { blockBuffer.position(getNextCellStartPosition()); } catch (IllegalArgumentException e) { @@ -810,32 +802,60 @@ public class HFileReaderV2 extends AbstractHFileReader { + "; currBlock currBlockOffset = " + block.getOffset()); throw e; } + } - if (blockBuffer.remaining() <= 0) { - long lastDataBlockOffset = - reader.getTrailer().getLastDataBlockOffset(); - - if (block.getOffset() >= lastDataBlockOffset) { - setNonSeekedState(); - return false; - } - - // read the next block - HFileBlock nextBlock = readNextDataBlock(); - if (nextBlock == null) { - setNonSeekedState(); - return false; - } + /** + * Set our selves up for the next 'next' invocation, set up next block. + * @return True is more to read else false if at the end. + * @throws IOException + */ + private boolean positionForNextBlock() throws IOException { + // Methods are small so they get inlined because they are 'hot'. + long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset(); + if (block.getOffset() >= lastDataBlockOffset) { + setNonSeekedState(); + return false; + } + return isNextBlock(); + } - updateCurrBlock(nextBlock); - return true; + private boolean isNextBlock() throws IOException { + // Methods are small so they get inlined because they are 'hot'. + HFileBlock nextBlock = readNextDataBlock(); + if (nextBlock == null) { + setNonSeekedState(); + return false; } + updateCurrBlock(nextBlock); + return true; + } + private final boolean _next() throws IOException { + // Small method so can be inlined. It is a hot one. + if (blockBuffer.remaining() <= 0) { + return positionForNextBlock(); + } // We are still in the same block. readKeyValueLen(); return true; } + /** + * Go to the next key/value in the block section. Loads the next block if + * necessary. If successful, {@link #getKey()} and {@link #getValue()} can + * be called. + * + * @return true if successfully navigated to the next key/value + */ + @Override + public boolean next() throws IOException { + // This is a hot method so extreme measures taken to ensure it is small and inlineable. + // Checked by setting: -XX:+UnlockDiagnosticVMOptions -XX:+PrintInlining -XX:+PrintCompilation + assertSeeked(); + positionThisBlockBuffer(); + return _next(); + } + protected int getNextCellStartPosition() { return blockBuffer.position() + KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen + currMemstoreTSLen; @@ -916,38 +936,72 @@ public class HFileReaderV2 extends AbstractHFileReader { this.nextIndexedKey = null; } + /** + * @param v + * @return True if v < 0 or v > current block buffer limit. + */ + protected final boolean checkLen(final int v) { + return v < 0 || v > this.blockBuffer.limit(); + } + + protected final void checkKeyValueLen() { + if (checkLen(this.currKeyLen) || checkLen(this.currValueLen)) { + throw new IllegalStateException("Invalid currKeyLen " + this.currKeyLen + + " or currValueLen " + this.currValueLen + ". Block offset: " + block.getOffset() + + ", block length: " + this.blockBuffer.limit() + ", position: " + + this.blockBuffer.position() + " (without header)."); + } + } + protected void readKeyValueLen() { - blockBuffer.mark(); - currKeyLen = blockBuffer.getInt(); - currValueLen = blockBuffer.getInt(); - ByteBufferUtils.skip(blockBuffer, currKeyLen + currValueLen); - readMvccVersion(); - if (currKeyLen < 0 || currValueLen < 0 - || currKeyLen > blockBuffer.limit() - || currValueLen > blockBuffer.limit()) { - throw new IllegalStateException("Invalid currKeyLen " + currKeyLen - + " or currValueLen " + currValueLen + ". Block offset: " - + block.getOffset() + ", block length: " + blockBuffer.limit() - + ", position: " + blockBuffer.position() + " (without header)."); + // TODO: METHOD DUPLICATED IN V3!!!!! FIX + // This is a hot method. We go out of our way to make this method short so it can be + // inlined and is not too big to compile. We also manage position in ByteBuffer ourselves + // because it is faster than going via range-checked ByteBuffer methods. + int p = blockBuffer.position() + blockBuffer.arrayOffset(); + // Get a long at a time rather than read two individual ints. In micro-benchmarking, even + // with the extra bit-fiddling, this is order-of-magnitude faster than getting two ints. + long ll = Bytes.toLong(blockBuffer.array(), p); + // Read top half as an int of key length and bottom int as value length + this.currKeyLen = (int)(ll >> Integer.SIZE); + this.currValueLen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); + checkKeyValueLen(); + // Move position past the key and value lengths and then beyond the key and value + p += (Bytes.SIZEOF_LONG + currKeyLen + currValueLen); + readMvccVersion(p); + } + + protected void readMvccVersion(final int position) { + if (!this.reader.shouldIncludeMemstoreTS()) return; + if (!this.reader.decodeMemstoreTS) { + currMemstoreTS = 0; + currMemstoreTSLen = 1; + return; } - blockBuffer.reset(); + _readMvccVersion(position); } - protected void readMvccVersion() { - if (this.reader.shouldIncludeMemstoreTS()) { - if (this.reader.decodeMemstoreTS) { - try { - currMemstoreTS = Bytes.readVLong(blockBuffer.array(), blockBuffer.arrayOffset() - + blockBuffer.position()); - currMemstoreTSLen = WritableUtils.getVIntSize(currMemstoreTS); - } catch (Exception e) { - throw new RuntimeException("Error reading memstore timestamp", e); - } - } else { - currMemstoreTS = 0; - currMemstoreTSLen = 1; + private void _readMvccVersion(final int position) { + // This is Bytes#bytesToVint inlined so can save a few instructions in this hot method. + byte firstByte = blockBuffer.array()[position]; + int len = WritableUtils.decodeVIntSize(firstByte); + if (len == 1) { + this.currMemstoreTS = firstByte; + } else { + long i = 0; + for (int idx = 0; idx < len - 1; idx++) { + byte b = blockBuffer.array()[position + 1 + idx]; + i = i << 8; + i = i | (b & 0xFF); } + currMemstoreTS = (WritableUtils.isNegativeVInt(firstByte) ? ~i : i); } + this.currMemstoreTSLen = len; + } + + protected void readMvccVersion() { + // TODO CLEANUP!!! + readMvccVersion(blockBuffer.arrayOffset() + blockBuffer.position()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java index b28d8c1..3a2ae7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java @@ -203,30 +203,35 @@ public class HFileReaderV3 extends HFileReaderV2 { return nextKvPos; } - protected void readKeyValueLen() { - blockBuffer.mark(); - currKeyLen = blockBuffer.getInt(); - currValueLen = blockBuffer.getInt(); - if (currKeyLen < 0 || currValueLen < 0 || currKeyLen > blockBuffer.limit() - || currValueLen > blockBuffer.limit()) { - throw new IllegalStateException("Invalid currKeyLen " + currKeyLen + " or currValueLen " - + currValueLen + ". Block offset: " - + block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)."); + private final void checkTagsLen() { + if (checkLen(this.currTagsLen)) { + throw new IllegalStateException("Invalid currTagsLen " + this.currTagsLen + + ". Block offset: " + block.getOffset() + ", block length: " + this.blockBuffer.limit() + + ", position: " + this.blockBuffer.position() + " (without header)."); } - ByteBufferUtils.skip(blockBuffer, currKeyLen + currValueLen); + } + + protected final void readKeyValueLen() { + // This is a hot method. We go out of our way to make this method short so it can be + // inlined and is not too big to compile. We also manage position in ByteBuffer ourselves + // because it is faster than going via range-checked ByteBuffer methods. + int p = blockBuffer.position() + blockBuffer.arrayOffset(); + // Get a long at a time rather than read two individual ints. In micro-benchmarking, even + // with the extra bit-fiddling, this is order-of-magnitude faster than getting two ints. + long ll = Bytes.toLong(blockBuffer.array(), p); + // Read top half as an int of key length and bottom int as value length + this.currKeyLen = (int)(ll >> Integer.SIZE); + this.currValueLen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); + checkKeyValueLen(); + // Move position past the key and value lengths and then beyond the key and value + p += (Bytes.SIZEOF_LONG + currKeyLen + currValueLen); if (reader.hfileContext.isIncludesTags()) { - // Read short as unsigned, high byte first - currTagsLen = ((blockBuffer.get() & 0xff) << 8) ^ (blockBuffer.get() & 0xff); - if (currTagsLen < 0 || currTagsLen > blockBuffer.limit()) { - throw new IllegalStateException("Invalid currTagsLen " + currTagsLen + ". Block offset: " - + block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)."); - } - ByteBufferUtils.skip(blockBuffer, currTagsLen); + // Tags length is a short. + this.currTagsLen = Bytes.toShort(blockBuffer.array(), p); + checkTagsLen(); + p += (Bytes.SIZEOF_SHORT + currTagsLen); } - readMvccVersion(); - blockBuffer.reset(); + readMvccVersion(p); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 8179499..877af04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1692,8 +1692,8 @@ public class HStore implements Store { * @return true if the cell is expired */ static boolean isCellTTLExpired(final Cell cell, final long oldestTimestamp, final long now) { - // Do not create an Iterator or Tag objects unless the cell actually has - // tags + // Do not create an Iterator or Tag objects unless the cell actually has tags. + // TODO: This check for tags is really expensive. We decode an int for key and value. Costs. if (cell.getTagsLength() > 0) { // Look for a TTL tag first. Use it instead of the family setting if // found. If a cell has multiple TTLs, resolve the conflict by using the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index df01875..fec2a45 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.io.InterruptedIOException; -import java.lang.Math; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.net.InetSocketAddress; @@ -74,7 +73,6 @@ import org.apache.hadoop.hbase.exceptions.OperationConflictException; import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.ipc.PriorityFunction; @@ -151,16 +149,14 @@ import org.apache.hadoop.hbase.regionserver.HRegion.Operation; import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException; import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler; -import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALKey; -import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Counter; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Strings; +import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.net.DNS; import org.apache.zookeeper.KeeperException; @@ -2085,7 +2081,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (maxResultSize <= 0) { maxResultSize = maxScannerResultSize; } - List values = new ArrayList(); + // This is cells inside a row. Default size is 10 so if many versions or many cfs, + // then we'll resize. Resizings show in profiler. Set it higher than 10. For now + // arbitrary 32. TODO: keep record of general size of results being returned. + List values = new ArrayList(32); region.startRegionOperation(Operation.SCAN); try { int i = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index 032b4ce..6aebe62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -278,8 +278,10 @@ public class ScanQueryMatcher { if (filter != null && filter.filterAllRemaining()) { return MatchCode.DONE_SCAN; } + int cellRowOffset = cell.getRowOffset(); + int cellRowLength = cell.getRowLength(); // Parse of a short. int ret = this.rowComparator.compareRows(row, this.rowOffset, this.rowLength, - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + cell.getRowArray(), cellRowOffset, cellRowLength); if (!this.isReversed) { if (ret <= -1) { return MatchCode.DONE; @@ -297,28 +299,62 @@ public class ScanQueryMatcher { } } - // optimize case. - if (this.stickyNextRow) - return MatchCode.SEEK_NEXT_ROW; + // Optimize case. + if (this.stickyNextRow) return MatchCode.SEEK_NEXT_ROW; if (this.columns.done()) { stickyNextRow = true; return MatchCode.SEEK_NEXT_ROW; } - int qualifierOffset = cell.getQualifierOffset(); - int qualifierLength = cell.getQualifierLength(); + return moreMatching(cell, cellRowOffset, cellRowLength); + } - long timestamp = cell.getTimestamp(); - // check for early out based on timestamp alone - if (columns.isDone(timestamp)) { - return columns.getNextRowOrNextColumn(cell.getQualifierArray(), qualifierOffset, - qualifierLength); + private MatchCode moreMatching(final Cell cell, final int cellRowOffset, + final int cellRowLength) + throws IOException { + int qualifierOffset; + int qualifierLength; + long timestamp; + byte typeByte; + if (cell instanceof KeyValue) { + // Dirty tricks if cell is a KeyValue. Below uglyness is to save on our reparsing lengths of + // families, rows, and keys more than once. + KeyValue kv = (KeyValue)cell; + int cellFamilyOffset = kv.getFamilyOffset(cellRowLength); + qualifierOffset = kv.getQualifierOffset(cellFamilyOffset); + int cellKeyLength = kv.getKeyLength(); + qualifierLength = kv.getQualifierLength(cellKeyLength, cellRowLength, + kv.getFamilyLength(cellFamilyOffset)); + timestamp = kv.getTimestamp(cellKeyLength); + // check for early out based on timestamp alone + if (columns.isDone(timestamp)) { + return columns.getNextRowOrNextColumn(cell.getQualifierArray(), qualifierOffset, + qualifierLength); + } + // check if the cell is expired by cell TTL. Do cheaper tag check up here before calling + // isCellTTLExpired. Small savings. + if (kv.getTagsLength(cellKeyLength) > 0) { + if (HStore.isCellTTLExpired(cell, this.oldestUnexpiredTS, this.now)) { + return MatchCode.SKIP; + } + } + typeByte = kv.getTypeByte(cellKeyLength); + } else { + qualifierOffset = cell.getQualifierOffset(); + qualifierLength = cell.getQualifierLength(); + timestamp = cell.getTimestamp(); + // check for early out based on timestamp alone + if (columns.isDone(timestamp)) { + return columns.getNextRowOrNextColumn(cell.getQualifierArray(), qualifierOffset, + qualifierLength); + } + // check if the cell is expired by cell TTL + if (HStore.isCellTTLExpired(cell, this.oldestUnexpiredTS, this.now)) { + return MatchCode.SKIP; + } + typeByte = cell.getTypeByte(); } - // check if the cell is expired by cell TTL - if (HStore.isCellTTLExpired(cell, this.oldestUnexpiredTS, this.now)) { - return MatchCode.SKIP; - } /* * The delete logic is pretty complicated now. @@ -333,8 +369,7 @@ public class ScanQueryMatcher { * 7. Delete marker need to be version counted together with puts * they affect */ - byte typeByte = cell.getTypeByte(); - long mvccVersion = cell.getMvccVersion(); + long mvccVersion = cell.getSequenceId(); if (CellUtil.isDelete(cell)) { if (keepDeletedCells == KeepDeletedCells.FALSE || (keepDeletedCells == KeepDeletedCells.TTL && timestamp < ttl)) {