Index: src/main/java/org/apache/hadoop/hbase/KeyValue.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/KeyValue.java (revision 1540556)
+++ src/main/java/org/apache/hadoop/hbase/KeyValue.java (working copy)
@@ -278,21 +278,6 @@
this.length = length;
}
- /**
- * Creates a KeyValue from the specified byte array, starting at offset,
- * for length length, and a known keyLength.
- * @param bytes byte array
- * @param offset offset to start of the KeyValue
- * @param length length of the KeyValue
- * @param keyLength length of the key portion of the KeyValue
- */
- public KeyValue(final byte [] bytes, final int offset, final int length, final int keyLength) {
- this.bytes = bytes;
- this.offset = offset;
- this.length = length;
- this.keyLength = keyLength;
- }
-
/** Constructors that build a new backing byte array from fields */
/**
@@ -834,13 +819,8 @@
/**
* @return Length of key portion.
*/
- private int keyLength = 0;
-
public int getKeyLength() {
- if (keyLength == 0) {
- keyLength = Bytes.toInt(this.bytes, this.offset);
- }
- return keyLength;
+ return Bytes.toInt(this.bytes, this.offset);
}
/**
@@ -2265,7 +2245,7 @@
public long heapSize() {
return ClassSize.align(ClassSize.OBJECT + ClassSize.REFERENCE
+ ClassSize.align(ClassSize.ARRAY) + ClassSize.align(length)
- + (3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_LONG);
+ + (2 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_LONG);
}
// this overload assumes that the length bytes have already been read,
@@ -2274,7 +2254,6 @@
public void readFields(int length, final DataInput in) throws IOException {
this.length = length;
this.offset = 0;
- this.keyLength = 0;
this.bytes = new byte[this.length];
in.readFully(this.bytes, 0, this.length);
}
Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java (revision 1540556)
+++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java (working copy)
@@ -638,8 +638,7 @@
KeyValue ret = new KeyValue(blockBuffer.array(),
blockBuffer.arrayOffset() + blockBuffer.position(),
- KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen,
- currKeyLen);
+ KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen);
if (this.reader.shouldIncludeMemstoreTS()) {
ret.setMemstoreTS(currMemstoreTS);
}
Index: src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java (revision 1540556)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java (working copy)
@@ -219,11 +219,12 @@
byte [] bytes = kv.getBuffer();
int offset = kv.getOffset();
- int initialOffset = offset;
int keyLength = Bytes.toInt(bytes, offset, Bytes.SIZEOF_INT);
offset += KeyValue.ROW_OFFSET;
+ int initialOffset = offset;
+
short rowLength = Bytes.toShort(bytes, offset, Bytes.SIZEOF_SHORT);
offset += Bytes.SIZEOF_SHORT;
@@ -254,10 +255,10 @@
byte familyLength = bytes [offset];
offset += familyLength + 1;
- int qualLength = keyLength + KeyValue.ROW_OFFSET -
+ int qualLength = keyLength -
(offset - initialOffset) - KeyValue.TIMESTAMP_TYPE_SIZE;
- long timestamp = kv.getTimestamp();
+ long timestamp = Bytes.toLong(bytes, initialOffset + keyLength - KeyValue.TIMESTAMP_TYPE_SIZE);
// check for early out based on timestamp alone
if (columns.isDone(timestamp)) {
return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
@@ -276,7 +277,7 @@
* 7. Delete marker need to be version counted together with puts
* they affect
*/
- byte type = kv.getType();
+ byte type = bytes[initialOffset + keyLength - 1];
if (kv.isDelete()) {
if (!keepDeletedCells) {
// first ignore delete markers if the scanner can do so, and the