diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java index 97724bd..f4b12eb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java @@ -46,8 +46,7 @@ public class BigDecimalColumnInterpreter extends ColumnInterpreter= limit) { return ReturnCode.NEXT_ROW; } + // TODO have a method CellComparator.compareQualifier(Cell, byte[]) byte[] buffer = v.getQualifierArray(); if (buffer == null) { return ReturnCode.SEEK_NEXT_USING_HINT; @@ -145,9 +147,7 @@ public class ColumnPaginationFilter extends FilterBase @Override public Cell getNextCellHint(Cell cell) { - return KeyValueUtil.createFirstOnRow( - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), - cell.getFamilyOffset(), cell.getFamilyLength(), columnOffset, 0, columnOffset.length); + return CellUtil.createFirstOnRowColumn(cell, columnOffset); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java index d2f058a..b753c92 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.filter; import java.util.ArrayList; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -53,16 +54,18 @@ public class ColumnPrefixFilter extends FilterBase { @Override public ReturnCode filterKeyValue(Cell kv) { - if (this.prefix == null || kv.getQualifierArray() == null) { + if (this.prefix == null) { return ReturnCode.INCLUDE; } else { - return filterColumn(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); + return filterColumn(kv); } } - public ReturnCode filterColumn(byte[] buffer, int qualifierOffset, int qualifierLength) { + public ReturnCode filterColumn(Cell kv) { + int qualifierLength = kv.getQualifierLength(); + // TODO have a method CellComparator.compareQualifier(Cell, byte[]) if (qualifierLength < prefix.length) { - int cmp = Bytes.compareTo(buffer, qualifierOffset, qualifierLength, this.prefix, 0, + int cmp = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), qualifierLength, this.prefix, 0, qualifierLength); if (cmp <= 0) { return ReturnCode.SEEK_NEXT_USING_HINT; @@ -70,7 +73,7 @@ public class ColumnPrefixFilter extends FilterBase { return ReturnCode.NEXT_ROW; } } else { - int cmp = Bytes.compareTo(buffer, qualifierOffset, this.prefix.length, this.prefix, 0, + int cmp = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), this.prefix.length, this.prefix, 0, this.prefix.length); if (cmp < 0) { return ReturnCode.SEEK_NEXT_USING_HINT; @@ -131,9 +134,7 @@ public class ColumnPrefixFilter extends FilterBase { @Override public Cell getNextCellHint(Cell cell) { - return KeyValueUtil.createFirstOnRow( - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), - cell.getFamilyOffset(), cell.getFamilyLength(), prefix, 0, prefix.length); + return CellUtil.createFirstOnRowColumn(cell, prefix); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java index d8ea094..1116860 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.hbase.util.Bytes.len; import java.util.ArrayList; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -123,6 +124,7 @@ public class ColumnRangeFilter extends FilterBase { int qualifierLength = kv.getQualifierLength(); int cmpMin = 1; + // TODO have a method CellComparator.compareQualifier(Cell, byte[]) if (this.minColumn != null) { cmpMin = Bytes.compareTo(buffer, qualifierOffset, qualifierLength, this.minColumn, 0, this.minColumn.length); @@ -217,9 +219,7 @@ public class ColumnRangeFilter extends FilterBase { @Override public Cell getNextCellHint(Cell cell) { - return KeyValueUtil.createFirstOnRow(cell.getRowArray(), cell.getRowOffset(), cell - .getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell - .getFamilyLength(), this.minColumn, 0, len(this.minColumn)); + return CellUtil.createFirstOnRowColumn(cell, minColumn); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java index e79a4d5..4923f60 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java @@ -63,6 +63,8 @@ public class FamilyFilter extends CompareFilter { public ReturnCode filterKeyValue(Cell v) { int familyLength = v.getFamilyLength(); if (familyLength > 0) { + // TODO unconditional call to getFamilyArray. Solve + // We would need comparator.compareTo(BB, int, int)? if (doCompare(this.compareOp, this.comparator, v.getFamilyArray(), v.getFamilyOffset(), familyLength)) { return ReturnCode.NEXT_ROW; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java index 88bf842..5a78313 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java @@ -78,9 +78,13 @@ public abstract class Filter { * @param length length of the row key * @return true, remove entire row, false, include the row (maybe). * @throws IOException in case an I/O or an filter specific failure needs to be signaled. + * @deprecated Instead use {@link #filterRowKey(Cell)} */ + @Deprecated abstract public boolean filterRowKey(byte[] buffer, int offset, int length) throws IOException; + abstract public boolean filterRowKey(Cell firstRowCell) throws IOException; + /** * If this returns true, the scan will terminate. * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java index a04dd89..1004fa2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java @@ -53,11 +53,17 @@ public abstract class FilterBase extends Filter { * * @inheritDoc */ + @Deprecated @Override public boolean filterRowKey(byte[] buffer, int offset, int length) throws IOException { return false; } + @Override + public boolean filterRowKey(Cell cell) throws IOException { + return filterRowKey(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + } + /** * Filters that never filter all remaining can inherit this implementation that * never stops the filter early. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java index ba1a818..5e5269f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -198,6 +198,27 @@ final public class FilterList extends Filter { } @Override + public boolean filterRowKey(Cell firstRowCell) throws IOException { + boolean flag = (this.operator == Operator.MUST_PASS_ONE) ? true : false; + int listize = filters.size(); + for (int i = 0; i < listize; i++) { + Filter filter = filters.get(i); + if (this.operator == Operator.MUST_PASS_ALL) { + if (filter.filterAllRemaining() || + filter.filterRowKey(firstRowCell)) { + flag = true; + } + } else if (this.operator == Operator.MUST_PASS_ONE) { + if (!filter.filterAllRemaining() && + !filter.filterRowKey(firstRowCell)) { + flag = false; + } + } + } + return flag; + } + + @Override public boolean filterAllRemaining() throws IOException { int listize = filters.size(); for (int i = 0; i < listize; i++) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java index 5176115..fbe3170 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java @@ -106,6 +106,11 @@ final public class FilterWrapper extends Filter { } @Override + public boolean filterRowKey(Cell cell) throws IOException { + return this.filter.filterRowKey(cell); + } + + @Override public ReturnCode filterKeyValue(Cell v) throws IOException { return this.filter.filterKeyValue(v); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java index b7ec11a..0f5f1d1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java @@ -18,12 +18,11 @@ package org.apache.hadoop.hbase.filter; import java.util.ArrayList; -import java.util.Arrays; import java.util.Comparator; import java.util.TreeSet; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -64,17 +63,17 @@ public class MultipleColumnPrefixFilter extends FilterBase { } @Override - public ReturnCode filterKeyValue(Cell kv) { - if (sortedPrefixes.size() == 0 || kv.getQualifierArray() == null) { + public ReturnCode filterKeyValue(Cell cell) { + if (sortedPrefixes.size() == 0) { return ReturnCode.INCLUDE; } else { - return filterColumn(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); + return filterColumn(cell); } } - public ReturnCode filterColumn(byte[] buffer, int qualifierOffset, int qualifierLength) { - byte [] qualifier = Arrays.copyOfRange(buffer, qualifierOffset, - qualifierLength + qualifierOffset); + public ReturnCode filterColumn(Cell cell) { + byte [] qualifier = new byte[cell.getQualifierLength()]; + CellUtil.copyQualifierTo(cell, qualifier, 0); TreeSet lesserOrEqualPrefixes = (TreeSet) sortedPrefixes.headSet(qualifier, true); @@ -156,9 +155,7 @@ public class MultipleColumnPrefixFilter extends FilterBase { @Override public Cell getNextCellHint(Cell cell) { - return KeyValueUtil.createFirstOnRow( - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), - cell.getFamilyOffset(), cell.getFamilyLength(), hint, 0, hint.length); + return CellUtil.createFirstOnRowColumn(cell, hint); } public TreeSet createTreeSet() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java index d905868..50f2f4d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java @@ -182,17 +182,17 @@ public class SingleColumnValueFilter extends FilterBase { return ReturnCode.INCLUDE; } foundColumn = true; - if (filterColumnValue(c.getValueArray(), - c.getValueOffset(), c.getValueLength())) { + if (filterColumnValue(c)) { return this.latestVersionOnly? ReturnCode.NEXT_ROW: ReturnCode.INCLUDE; } this.matchedColumn = true; return ReturnCode.INCLUDE; } - private boolean filterColumnValue(final byte [] data, final int offset, - final int length) { - int compareResult = this.comparator.compareTo(data, offset, length); + private boolean filterColumnValue(final Cell c) { + // TODO add a compareTo which takes BB in ByteArrayComparable? + // It will be strange? Check how can we avoid unconditional call to getValueArray. + int compareResult = this.comparator.compareTo(c.getValueArray(), c.getValueOffset(), c.getValueLength()); switch (this.compareOp) { case LESS: return compareResult <= 0; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java index 31d4f77..e75ca49 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java @@ -73,6 +73,13 @@ public class WhileMatchFilter extends FilterBase { } @Override + public boolean filterRowKey(Cell cell) throws IOException { + boolean value = filter.filterRowKey(cell); + changeFAR(value); + return value; + } + + @Override public ReturnCode filterKeyValue(Cell v) throws IOException { ReturnCode c = filter.filterKeyValue(v); changeFAR(c != ReturnCode.INCLUDE); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java index e4758b0..580dc03 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java @@ -24,6 +24,7 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -249,14 +250,12 @@ public class TablePermission extends Permission { } if (family != null && - (Bytes.compareTo(family, 0, family.length, - kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()) != 0)) { + !(CellUtil.matchingFamily(kv,family, 0, family.length))) { return false; } if (qualifier != null && - (Bytes.compareTo(qualifier, 0, qualifier.length, - kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()) != 0)) { + !(CellUtil.matchingQualifier(kv, qualifier))) { return false; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/BufferedCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/BufferedCell.java new file mode 100644 index 0000000..7d7ad37 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/BufferedCell.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +@InterfaceAudience.Private +public interface BufferedCell extends Cell { // TODO better name? + + ByteBuffer getRowBuffer(); + + int getRowPositionInBuffer();// TODO better name + + ByteBuffer getFamilyBuffer(); + + int getFamilyPositionInBuffer(); + + ByteBuffer getQualifierBuffer(); + + int getQualifierPositionInBuffer(); + + ByteBuffer getValueBuffer(); + + int getValuePositionInBuffer(); + + ByteBuffer getTagsBuffer(); + + int getTagsPositionInBuffer(); + +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java index 540c967..a56bd0f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java @@ -24,6 +24,7 @@ import java.util.Comparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import com.google.common.primitives.Longs; @@ -116,22 +117,19 @@ public class CellComparator implements Comparator, Serializable { && equalsType(a, b); } + // TODO we can remove this and HBASE-10800 will do that I guess. public static boolean equalsRow(Cell a, Cell b){ - return Bytes.equals( - a.getRowArray(), a.getRowOffset(), a.getRowLength(), - b.getRowArray(), b.getRowOffset(), b.getRowLength()); + return CellUtil.matchingRow(a, b); } - public static boolean equalsFamily(Cell a, Cell b){ - return Bytes.equals( - a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(), - b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength()); + // TODO we can remove this and HBASE-10800 will do that I guess. + public static boolean equalsFamily(Cell a, Cell b) { + return CellUtil.matchingFamily(a, b); } - public static boolean equalsQualifier(Cell a, Cell b){ - return Bytes.equals( - a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(), - b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength()); + // TODO we can remove this and HBASE-10800 will do that I guess. + public static boolean equalsQualifier(Cell a, Cell b) { + return CellUtil.matchingQualifier(a, b); } public static boolean equalsTimestamp(Cell a, Cell b){ @@ -160,11 +158,46 @@ public class CellComparator implements Comparator, Serializable { } public static int compareFamilies(Cell left, Cell right) { + boolean rightIsBufferedCell = (right instanceof BufferedCell); + if (left instanceof BufferedCell) { + BufferedCell b1 = (BufferedCell) left; + if (rightIsBufferedCell) { + BufferedCell b2 = (BufferedCell) right; + return ByteBufferUtils.compareTo(b1.getFamilyBuffer(), b1.getFamilyPositionInBuffer(), + b1.getFamilyLength(), b2.getFamilyBuffer(), b2.getFamilyPositionInBuffer(), b2.getFamilyLength()); + } else { + return ByteBufferUtils.compareTo(b1.getFamilyBuffer(), b1.getFamilyPositionInBuffer(), + b1.getFamilyLength(), right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); + } + } else if (rightIsBufferedCell) { + BufferedCell b2 = (BufferedCell) right; + return -(ByteBufferUtils.compareTo(b2.getFamilyBuffer(), b2.getFamilyPositionInBuffer(), + b2.getFamilyLength(), left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength())); + } return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); } public static int compareQualifiers(Cell left, Cell right) { + boolean rightIsBufferedCell = (right instanceof BufferedCell); + if (left instanceof BufferedCell) { + BufferedCell b1 = (BufferedCell) left; + if (rightIsBufferedCell) { + BufferedCell b2 = (BufferedCell) right; + return ByteBufferUtils.compareTo(b1.getQualifierBuffer(), + b1.getQualifierPositionInBuffer(), b1.getQualifierLength(), b2.getQualifierBuffer(), + b2.getQualifierPositionInBuffer(), b2.getQualifierLength()); + } else { + return ByteBufferUtils.compareTo(b1.getQualifierBuffer(), + b1.getQualifierPositionInBuffer(), b1.getQualifierLength(), right.getQualifierArray(), + right.getQualifierOffset(), right.getQualifierLength()); + } + } else if (rightIsBufferedCell) { + BufferedCell b2 = (BufferedCell) right; + return -(ByteBufferUtils.compareTo(b2.getQualifierBuffer(), + b2.getQualifierPositionInBuffer(), b2.getQualifierLength(), left.getQualifierArray(), + left.getQualifierOffset(), left.getQualifierLength())); + } return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); @@ -183,6 +216,22 @@ public class CellComparator implements Comparator, Serializable { * so can't be treated as plain byte arrays as this method does. */ public static int compareRows(final Cell left, final Cell right) { + boolean rightIsBufferedCell = (right instanceof BufferedCell); + if (left instanceof BufferedCell) { + BufferedCell b1 = (BufferedCell) left; + if (rightIsBufferedCell) { + BufferedCell b2 = (BufferedCell) right; + return ByteBufferUtils.compareTo(b1.getRowBuffer(), b1.getRowPositionInBuffer(), + b1.getRowLength(), b2.getRowBuffer(), b2.getRowPositionInBuffer(), b2.getRowLength()); + } else { + return ByteBufferUtils.compareTo(b1.getRowBuffer(), b1.getRowPositionInBuffer(), + b1.getRowLength(), right.getRowArray(), right.getRowOffset(), right.getRowLength()); + } + } else if (rightIsBufferedCell) { + BufferedCell b2 = (BufferedCell) right; + return -(ByteBufferUtils.compareTo(b2.getRowBuffer(), b2.getRowPositionInBuffer(), + b2.getRowLength(), left.getRowArray(), left.getRowOffset(), left.getRowLength())); + } return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right.getRowArray(), right.getRowOffset(), right.getRowLength()); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index bce3957..119216a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -20,6 +20,9 @@ package org.apache.hadoop.hbase; import java.io.DataOutputStream; import java.io.IOException; +import java.io.OutputStream; +import java.math.BigDecimal; +import java.math.RoundingMode; import java.nio.ByteBuffer; import java.util.Iterator; import java.util.List; @@ -112,24 +115,42 @@ public final class CellUtil { /******************** copyTo **********************************/ public static int copyRowTo(Cell cell, byte[] destination, int destinationOffset) { - System.arraycopy(cell.getRowArray(), cell.getRowOffset(), destination, destinationOffset, - cell.getRowLength()); + if (cell instanceof BufferedCell) { + ByteBufferUtils.copyFromBufferToArray(destination, ((BufferedCell) cell).getRowBuffer(), + ((BufferedCell) cell).getRowPositionInBuffer(), destinationOffset, cell.getRowLength()); + } else { + System.arraycopy(cell.getRowArray(), cell.getRowOffset(), destination, destinationOffset, + cell.getRowLength()); + } return destinationOffset + cell.getRowLength(); } public static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset) { - System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), destination, destinationOffset, - cell.getFamilyLength()); + if (cell instanceof BufferedCell) { + ByteBufferUtils.copyFromBufferToArray(destination, ((BufferedCell) cell).getFamilyBuffer(), + ((BufferedCell) cell).getFamilyPositionInBuffer(), destinationOffset, cell.getFamilyLength()); + } else { + System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), destination, destinationOffset, + cell.getFamilyLength()); + } return destinationOffset + cell.getFamilyLength(); } public static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset) { - System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), destination, - destinationOffset, cell.getQualifierLength()); + if (cell instanceof BufferedCell) { + ByteBufferUtils.copyFromBufferToArray(destination, + ((BufferedCell) cell).getQualifierBuffer(), + ((BufferedCell) cell).getQualifierPositionInBuffer(), destinationOffset, + cell.getQualifierLength()); + } else { + System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), destination, + destinationOffset, cell.getQualifierLength()); + } return destinationOffset + cell.getQualifierLength(); } public static int copyValueTo(Cell cell, byte[] destination, int destinationOffset) { + // TODO check for BufferedCell System.arraycopy(cell.getValueArray(), cell.getValueOffset(), destination, destinationOffset, cell.getValueLength()); return destinationOffset + cell.getValueLength(); @@ -143,6 +164,7 @@ public final class CellUtil { * @return position after tags */ public static int copyTagTo(Cell cell, byte[] destination, int destinationOffset) { + // TODO check for BufferedCell System.arraycopy(cell.getTagsArray(), cell.getTagsOffset(), destination, destinationOffset, cell.getTagsLength()); return destinationOffset + cell.getTagsLength(); @@ -151,6 +173,11 @@ public final class CellUtil { /********************* misc *************************************/ public static byte getRowByte(Cell cell, int index) { + if (cell instanceof BufferedCell) { + // TODO use BBUtils method for getByte from a BB when we add that. + ((BufferedCell) cell).getRowBuffer().get( + ((BufferedCell) cell).getRowPositionInBuffer() + index); + } return cell.getRowArray()[cell.getRowOffset() + index]; } @@ -374,38 +401,92 @@ public final class CellUtil { * @return True if the rows in left and right Cells match */ public static boolean matchingRow(final Cell left, final Cell right) { + if (left instanceof BufferedCell) { + BufferedCell b1 = (BufferedCell) left; + if (right instanceof BufferedCell) { + return ByteBufferUtils.equals(b1.getRowBuffer(), b1.getRowPositionInBuffer(), b1.getRowLength(), + ((BufferedCell) right).getRowBuffer(), ((BufferedCell) right).getRowPositionInBuffer(), + right.getRowLength()); + } else { + return ByteBufferUtils.equals(b1.getRowBuffer(), b1.getRowPositionInBuffer(), b1.getRowLength(), + right.getRowArray(), right.getRowOffset(), right.getRowLength()); + } + } else if (right instanceof BufferedCell) { + return ByteBufferUtils.equals(((BufferedCell) right).getRowBuffer(), + ((BufferedCell) right).getRowPositionInBuffer(), right.getRowLength(), + left.getRowArray(), left.getRowOffset(), left.getRowLength()); + } return Bytes.equals(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right.getRowArray(), right.getRowOffset(), right.getRowLength()); } public static boolean matchingRow(final Cell left, final byte[] buf) { - return Bytes.equals(left.getRowArray(), left.getRowOffset(), left.getRowLength(), buf, 0, - buf.length); + return matchingRow(left, buf, 0, buf.length); } public static boolean matchingRow(final Cell left, final byte[] buf, final int offset, final int length) { + if (left instanceof BufferedCell) { + return ByteBufferUtils.equals(((BufferedCell) left).getRowBuffer(), + ((BufferedCell) left).getRowPositionInBuffer(), left.getRowLength(), buf, offset, length); + } return Bytes.equals(left.getRowArray(), left.getRowOffset(), left.getRowLength(), buf, offset, length); } public static boolean matchingFamily(final Cell left, final Cell right) { + if (left instanceof BufferedCell) { + BufferedCell b1 = (BufferedCell) left; + if (right instanceof BufferedCell) { + return ByteBufferUtils.equals(b1.getFamilyBuffer(), b1.getFamilyPositionInBuffer(), + b1.getFamilyLength(), ((BufferedCell) right).getFamilyBuffer(), + ((BufferedCell) right).getFamilyPositionInBuffer(), right.getFamilyLength()); + } else { + return ByteBufferUtils.equals(b1.getFamilyBuffer(), b1.getFamilyPositionInBuffer(), + b1.getFamilyLength(), right.getFamilyArray(), right.getFamilyOffset(), + right.getFamilyLength()); + } + } else if (right instanceof BufferedCell) { + return ByteBufferUtils.equals(((BufferedCell) right).getFamilyBuffer(), + ((BufferedCell) right).getFamilyPositionInBuffer(), right.getFamilyLength(), + left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength()); + } return Bytes.equals(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); } public static boolean matchingFamily(final Cell left, final byte[] buf) { - return Bytes.equals(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), buf, - 0, buf.length); + return matchingFamily(left, buf, 0, buf.length); } public static boolean matchingFamily(final Cell left, final byte[] buf, final int offset, final int length) { + if (left instanceof BufferedCell) { + return ByteBufferUtils.equals(((BufferedCell) left).getFamilyBuffer(), + ((BufferedCell) left).getFamilyPositionInBuffer(), left.getFamilyLength(), buf, offset, + length); + } return Bytes.equals(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), buf, offset, length); } public static boolean matchingQualifier(final Cell left, final Cell right) { + if (left instanceof BufferedCell) { + BufferedCell b1 = (BufferedCell) left; + if (right instanceof BufferedCell) { + return ByteBufferUtils.equals(b1.getQualifierBuffer(), b1.getQualifierPositionInBuffer(), + b1.getQualifierLength(), ((BufferedCell) right).getQualifierBuffer(), + ((BufferedCell) right).getQualifierPositionInBuffer(), right.getQualifierLength()); + } else { + return ByteBufferUtils.equals(b1.getQualifierBuffer(), b1.getQualifierPositionInBuffer(), + b1.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), + right.getQualifierLength()); + } + } else if (right instanceof BufferedCell) { + return ByteBufferUtils.equals(((BufferedCell) right).getQualifierBuffer(), + ((BufferedCell) right).getQualifierPositionInBuffer(), right.getQualifierLength(), + left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength()); + } return Bytes.equals(left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); @@ -415,8 +496,7 @@ public final class CellUtil { if (buf == null) { return left.getQualifierLength() == 0; } - return Bytes.equals(left.getQualifierArray(), left.getQualifierOffset(), - left.getQualifierLength(), buf, 0, buf.length); + return matchingQualifier(left, buf, 0, buf.length); } public static boolean matchingQualifier(final Cell left, final byte[] buf, final int offset, @@ -424,6 +504,11 @@ public final class CellUtil { if (buf == null) { return left.getQualifierLength() == 0; } + if (left instanceof BufferedCell) { + return ByteBufferUtils.equals(((BufferedCell) left).getQualifierBuffer(), + ((BufferedCell) left).getQualifierPositionInBuffer(), left.getQualifierLength(), buf, + offset, length); + } return Bytes.equals(left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength(), buf, offset, length); } @@ -448,11 +533,32 @@ public final class CellUtil { } public static boolean matchingValue(final Cell left, final Cell right) { + if (left instanceof BufferedCell) { + BufferedCell b1 = (BufferedCell) left; + if (right instanceof BufferedCell) { + return ByteBufferUtils.equals(b1.getValueBuffer(), b1.getValuePositionInBuffer(), + b1.getValueLength(), ((BufferedCell) right).getValueBuffer(), + ((BufferedCell) right).getValuePositionInBuffer(), right.getValueLength()); + } else { + return ByteBufferUtils.equals(b1.getValueBuffer(), b1.getValuePositionInBuffer(), + b1.getValueLength(), right.getValueArray(), right.getValueOffset(), + right.getValueLength()); + } + } else if (right instanceof BufferedCell) { + return ByteBufferUtils.equals(((BufferedCell) right).getValueBuffer(), + ((BufferedCell) right).getValuePositionInBuffer(), right.getValueLength(), + left.getValueArray(), left.getValueOffset(), left.getValueLength()); + } return Bytes.equals(left.getValueArray(), left.getValueOffset(), left.getValueLength(), right.getValueArray(), right.getValueOffset(), right.getValueLength()); } public static boolean matchingValue(final Cell left, final byte[] buf) { + if (left instanceof BufferedCell) { + return ByteBufferUtils.equals(((BufferedCell) left).getValueBuffer(), + ((BufferedCell) left).getValuePositionInBuffer(), left.getValueLength(), buf, 0, + buf.length); + } return Bytes.equals(left.getValueArray(), left.getValueOffset(), left.getValueLength(), buf, 0, buf.length); } @@ -902,4 +1008,101 @@ public final class CellUtil { return builder.toString(); } + + public static int getRowAsInt(Cell cell) { + if (cell instanceof BufferedCell) { + BufferedCell bCell = (BufferedCell) cell; + return ByteBufferUtils.getInt(bCell.getRowBuffer(), bCell.getRowPositionInBuffer()); + } + return Bytes.toInt(cell.getRowArray(), cell.getRowOffset()); + } + + public static long getValueAsLong(Cell cell){ + if (cell instanceof BufferedCell) { + BufferedCell bCell = (BufferedCell) cell; + return ByteBufferUtils.getLong(bCell.getValueBuffer(), bCell.getValuePositionInBuffer()); + } + return Bytes.toLong(cell.getValueArray(), cell.getValueOffset()); + } + + public static double getValueAsDouble(Cell cell){ + if (cell instanceof BufferedCell) { + // TODO add getDouble in BBU + } + return Bytes.toDouble(cell.getValueArray(), cell.getValueOffset()); + } + + public static BigDecimal getValueAsBigDecimal(Cell cell){ + if (cell instanceof BufferedCell) { + // TODO add getBigDecimal in BBU + } + return Bytes.toBigDecimal(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()). + setScale(2, RoundingMode.HALF_EVEN); + } + + public static Cell createFirstOnRow(Cell c) { + if (c instanceof BufferedCell) { + BufferedCell bc = (BufferedCell) c; + return new FakeKeyBufferBackedCell(bc.getRowBuffer(), bc.getRowPositionInBuffer(), + c.getRowLength(), HConstants.EMPTY_BB, 0, (byte) 0, HConstants.EMPTY_BB, 0, 0, + HConstants.LATEST_TIMESTAMP, Type.Maximum.getCode()); + } + return new FakeKeyArrayBackedCell(c.getRowArray(), c.getRowOffset(), c.getRowLength(), + HConstants.EMPTY_BYTE_ARRAY, 0, (byte) 0, HConstants.EMPTY_BYTE_ARRAY, 0, 0, + HConstants.LATEST_TIMESTAMP, Type.Maximum.getCode()); + } + + public static Cell createFirstOnRowColumn(Cell c, byte[] qual) { + int qLen = (qual == null) ? 0 : qual.length; + return createFirstOnRowColumn(c, qual, 0, qLen); + } + + public static Cell createFirstOnRowColumn(Cell c, byte[] qual, int qOff, int qLen) { + qual = (qual == null) ? HConstants.EMPTY_BYTE_ARRAY : qual; + if (c instanceof BufferedCell) { + BufferedCell bc = (BufferedCell) c; + return new FakeKeyBufferBackedCell(bc.getRowBuffer(), bc.getRowPositionInBuffer(), + c.getRowLength(), bc.getFamilyBuffer(), bc.getFamilyPositionInBuffer(), + c.getFamilyLength(), ByteBuffer.wrap(qual), qOff, qLen, HConstants.LATEST_TIMESTAMP, + Type.Maximum.getCode()); + } + return new FakeKeyArrayBackedCell(c.getRowArray(), c.getRowOffset(), c.getRowLength(), + c.getFamilyArray(), c.getFamilyOffset(), c.getFamilyLength(), qual, qOff, qLen, + HConstants.LATEST_TIMESTAMP, Type.Maximum.getCode()); + } + + public static Cell createLastOnRowColumn(Cell c) { + if (c instanceof BufferedCell) { + BufferedCell bc = (BufferedCell) c; + return new FakeKeyBufferBackedCell(bc.getRowBuffer(), bc.getRowPositionInBuffer(), + c.getRowLength(), bc.getFamilyBuffer(), bc.getFamilyPositionInBuffer(), + c.getFamilyLength(), bc.getQualifierBuffer(), bc.getQualifierPositionInBuffer(), + c.getQualifierLength(), HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + } + return new FakeKeyArrayBackedCell(c.getRowArray(), c.getRowOffset(), c.getRowLength(), + c.getFamilyArray(), c.getFamilyOffset(), c.getFamilyLength(), c.getQualifierArray(), + c.getQualifierOffset(), c.getQualifierLength(), HConstants.OLDEST_TIMESTAMP, + Type.Minimum.getCode()); + } + + public static Cell createLastOnRow(Cell c) { + if (c instanceof BufferedCell) { + BufferedCell bc = (BufferedCell) c; + return new FakeKeyBufferBackedCell(bc.getRowBuffer(), bc.getRowPositionInBuffer(), + c.getRowLength(), HConstants.EMPTY_BB, 0, (byte) 0, HConstants.EMPTY_BB, 0, 0, + HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + } + return new FakeKeyArrayBackedCell(c.getRowArray(), c.getRowOffset(), c.getRowLength(), + HConstants.EMPTY_BYTE_ARRAY, 0, (byte) 0, HConstants.EMPTY_BYTE_ARRAY, 0, 0, + HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + } + + public static void writeValue(Cell c, int vlen, OutputStream os) throws IOException { + if (c instanceof BufferedCell) { + ByteBufferUtils.copyBufferToStream(os, ((BufferedCell) c).getValueBuffer(), + ((BufferedCell) c).getValuePositionInBuffer(), vlen); + } else { + os.write(c.getValueArray(), c.getValueOffset(), vlen); + } + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/FakeKeyArrayBackedCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/FakeKeyArrayBackedCell.java new file mode 100644 index 0000000..923d932 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/FakeKeyArrayBackedCell.java @@ -0,0 +1,173 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * Creates fake keys used in scanners and these cells are byte[] backed + */ +@InterfaceAudience.Private +public class FakeKeyArrayBackedCell implements Cell { + private final byte[] row, fam, qual; + private final int rOff, fOff, qOff, qLen; + private final short rLen; + private final byte fLen; + private final byte type; + private final long ts; + + public FakeKeyArrayBackedCell(byte[] row, int rOff, short rLen, byte[] fam, int fOff, byte fLen, + byte[] qual, int qOff, int qLen, long ts, byte type) { + this.row = row; + this.rOff = rOff; + this.rLen = rLen; + this.fam = fam; + this.fOff = fOff; + this.fLen = fLen; + this.qual = qual; + this.qOff = qOff; + this.qLen = qLen; + this.ts = ts; + this.type = type; + } + + @Override + public byte[] getRowArray() { + return this.row; + } + + @Override + public int getRowOffset() { + return this.rOff; + } + + @Override + public short getRowLength() { + return this.rLen; + } + + @Override + public byte[] getFamilyArray() { + return this.fam; + } + + @Override + public int getFamilyOffset() { + return this.fOff; + } + + @Override + public byte getFamilyLength() { + return this.fLen; + } + + @Override + public byte[] getQualifierArray() { + return this.qual; + } + + @Override + public int getQualifierOffset() { + return this.qOff; + } + + @Override + public int getQualifierLength() { + return this.qLen; + } + + @Override + public long getTimestamp() { + return ts; + } + + @Override + public byte getTypeByte() { + return type; + } + + @Override + @Deprecated + public long getMvccVersion() { + return getSequenceId(); + } + + @Override + public long getSequenceId() { + return 0; + } + + @Override + public byte[] getValueArray() { + return HConstants.EMPTY_BYTE_ARRAY; + } + + @Override + public int getValueOffset() { + return 0; + } + + @Override + public int getValueLength() { + return 0; + } + + @Override + public byte[] getTagsArray() { + return HConstants.EMPTY_BYTE_ARRAY; + } + + @Override + public int getTagsOffset() { + return 0; + } + + @Override + public int getTagsLength() { + return 0; + } + + @Override + @Deprecated + public byte[] getValue() { + return HConstants.EMPTY_BYTE_ARRAY; + } + + @Override + @Deprecated + public byte[] getFamily() { + byte[] fam = new byte[fLen]; + System.arraycopy(this.fam, fOff, fam, 0, fLen); + return fam; + } + + @Override + @Deprecated + public byte[] getQualifier() { + byte[] qual = new byte[qLen]; + System.arraycopy(this.qual, qOff, qual, 0, qLen); + return qual; + } + + @Override + @Deprecated + public byte[] getRow() { + byte[] row = new byte[rLen]; + System.arraycopy(this.row, rOff, row, 0, rLen); + return row; + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/FakeKeyBufferBackedCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/FakeKeyBufferBackedCell.java new file mode 100644 index 0000000..55e2149 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/FakeKeyBufferBackedCell.java @@ -0,0 +1,227 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.ByteBufferUtils; + +/** + * Creates fake keys used in scanners and these cells are buffer backed + */ +@InterfaceAudience.Private +public class FakeKeyBufferBackedCell implements BufferedCell { + private final ByteBuffer row, fam, qual; + private final int rOff, fOff, qOff, qLen; + private final short rLen; + private final byte fLen; + private final byte type; + private final long ts; + + public FakeKeyBufferBackedCell(ByteBuffer row, int rOff, short rLen, ByteBuffer fam, int fOff, + byte fLen, ByteBuffer qual, int qOff, int qLen, long ts, byte type) { + this.row = row; + this.rOff = rOff; + this.rLen = rLen; + this.fam = fam; + this.fOff = fOff; + this.fLen = fLen; + this.qual = qual; + this.qOff = qOff; + this.qLen = qLen; + this.ts = ts; + this.type = type; + } + + @Override + public byte[] getRowArray() { + byte[] rowBytes = new byte[rLen]; + ByteBufferUtils.copyFromBufferToArray(rowBytes, this.row, this.rOff, 0, rLen); + return rowBytes; + } + + @Override + public int getRowOffset() { + return 0; + } + + @Override + public short getRowLength() { + return this.rLen; + } + + @Override + public byte[] getFamilyArray() { + byte[] fBytes = new byte[fLen]; + ByteBufferUtils.copyFromBufferToArray(fBytes, this.fam, this.fOff, 0, fLen); + return fBytes; + } + + @Override + public int getFamilyOffset() { + return 0; + } + + @Override + public byte getFamilyLength() { + return this.fLen; + } + + @Override + public byte[] getQualifierArray() { + byte[] qBytes = new byte[qLen]; + ByteBufferUtils.copyFromBufferToArray(qBytes, this.qual, this.qOff, 0, qLen); + return qBytes; + } + + @Override + public int getQualifierOffset() { + return 0; + } + + @Override + public int getQualifierLength() { + return this.qLen; + } + + @Override + public long getTimestamp() { + return ts; + } + + @Override + public byte getTypeByte() { + return type; + } + + @Override + @Deprecated + public long getMvccVersion() { + return getSequenceId(); + } + + @Override + public long getSequenceId() { + return 0; + } + + @Override + public byte[] getValueArray() { + return HConstants.EMPTY_BYTE_ARRAY; + } + + @Override + public int getValueOffset() { + return 0; + } + + @Override + public int getValueLength() { + return 0; + } + + @Override + public byte[] getTagsArray() { + return HConstants.EMPTY_BYTE_ARRAY; + } + + @Override + public int getTagsOffset() { + return 0; + } + + @Override + public int getTagsLength() { + return 0; + } + + @Override + @Deprecated + public byte[] getValue() { + return getValueArray(); + } + + @Override + @Deprecated + public byte[] getFamily() { + return getFamilyArray(); + } + + @Override + @Deprecated + public byte[] getQualifier() { + return getQualifierArray(); + } + + @Override + @Deprecated + public byte[] getRow() { + return getRowArray(); + } + + @Override + public ByteBuffer getRowBuffer() { + return this.row; + } + + @Override + public int getRowPositionInBuffer() { + return this.rOff; + } + + @Override + public ByteBuffer getFamilyBuffer() { + return this.fam; + } + + @Override + public int getFamilyPositionInBuffer() { + return this.fOff; + } + + @Override + public ByteBuffer getQualifierBuffer() { + return this.qual; + } + + @Override + public int getQualifierPositionInBuffer() { + return this.qOff; + } + + @Override + public ByteBuffer getValueBuffer() { + return HConstants.EMPTY_BB; + } + + @Override + public int getValuePositionInBuffer() { + return 0; + } + + @Override + public ByteBuffer getTagsBuffer() { + return HConstants.EMPTY_BB; + } + + @Override + public int getTagsPositionInBuffer() { + return 0; + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 19e251a..880b6b2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase; import static org.apache.hadoop.hbase.io.hfile.BlockType.MAGIC_LENGTH; +import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.util.Arrays; import java.util.Collections; @@ -490,6 +491,8 @@ public final class HConstants { */ public static final byte [] EMPTY_BYTE_ARRAY = new byte [0]; + public static final ByteBuffer EMPTY_BB = ByteBuffer.wrap(HConstants.EMPTY_BYTE_ARRAY); + /** * Used by scanners, etc when they want to start at the beginning of a region */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 7de1f54..2171acf 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -79,7 +79,7 @@ import com.google.common.annotations.VisibleForTesting; * and actual tag bytes length. */ @InterfaceAudience.Private -public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, SettableTimestamp { +public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, SettableTimestamp, OutputStreamable { private static final ArrayList EMPTY_ARRAY_LIST = new ArrayList(); static final Log LOG = LogFactory.getLog(KeyValue.class); @@ -1832,6 +1832,8 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * portion. This means two KeyValues with same Key but different Values are * considered the same as far as this Comparator is concerned. */ + // Note: Not doing any changes in this Comparators as HBASE-10800 will replace all these with + // CellComparator and its children public static class KVComparator implements RawComparator, SamePrefixComparator { /** @@ -2417,6 +2419,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * @param newTags * @return a new KeyValue instance with new tags */ + // TODO purge it. Instead one has to use TagRewriteCell public static KeyValue cloneAndAddTags(Cell c, List newTags) { List existingTags = null; if(c.getTagsLength() > 0) { @@ -2822,4 +2825,9 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, return super.equals(other); } } + + @Override + public long stream(OutputStream out, boolean withTags) throws IOException { + return oswrite(this, out, withTags); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java index 7cbfdd6..9da32f4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java @@ -544,8 +544,8 @@ public class KeyValueUtil { public static void oswrite(final Cell cell, final OutputStream out, final boolean withTags) throws IOException { - if (cell instanceof KeyValue) { - KeyValue.oswrite((KeyValue) cell, out, withTags); + if (cell instanceof OutputStreamable) { + ((OutputStreamable)cell).stream(out, withTags); } else { short rlen = cell.getRowLength(); byte flen = cell.getFamilyLength(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/OutputStreamable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/OutputStreamable.java new file mode 100644 index 0000000..a8472bf --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/OutputStreamable.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.io.OutputStream; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +@InterfaceAudience.Private +public interface OutputStreamable { + + long stream(OutputStream out, boolean withTags) throws IOException; +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java index 9d03d89..764edda 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java @@ -43,6 +43,7 @@ public class CellCodec implements Codec { @Override public void write(Cell cell) throws IOException { checkFlushed(); + // TODO BufferedCell check comes here. // Row write(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); // Column family diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java index be8c192..e011654 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java @@ -325,6 +325,7 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { // there. So this has to be an instance of SettableSequenceId. SeekerState need not be // SettableSequenceId as we never return that to top layers. When we have to, we make // ClonedSeekerState from it. + // TODO make it implements OutputStreamable protected static class ClonedSeekerState implements Cell, HeapSize, SettableSequenceId { private static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT + (4 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + (7 * Bytes.SIZEOF_INT) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java index 6b87c77..ebb20f3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java @@ -47,7 +47,7 @@ public class CopyKeyDataBlockEncoder extends BufferedDataBlockEncoder { out.writeInt(klength); out.writeInt(vlength); CellUtil.writeFlatKey(cell, out); - out.write(cell.getValueArray(), cell.getValueOffset(), vlength); + CellUtil.writeValue(cell, vlength, out); int size = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE; // Write the additional tag into the stream if (encodingContext.getHFileContext().isIncludesTags()) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java index 4182dc4..3040b07 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java @@ -300,7 +300,7 @@ public class DiffKeyDeltaEncoder extends BufferedDataBlockEncoder { if ((flag & FLAG_SAME_TYPE) == 0) { out.write(cell.getTypeByte()); } - out.write(cell.getValueArray(), cell.getValueOffset(), vLength); + CellUtil.writeValue(cell, vLength, out); return kLength + vLength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java index a6f43d0..6f87a4c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java @@ -262,7 +262,7 @@ public class FastDiffDeltaEncoder extends BufferedDataBlockEncoder { ByteBufferUtils.putCompressedInt(out, 0); CellUtil.writeFlatKey(cell, out); // Write the value part - out.write(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + CellUtil.writeValue(cell, vLength, out); } else { int preKeyLength = KeyValueUtil.keyLength(prevCell); int preValLength = prevCell.getValueLength(); @@ -288,8 +288,7 @@ public class FastDiffDeltaEncoder extends BufferedDataBlockEncoder { // Check if current and previous values are the same. Compare value // length first as an optimization. if (vLength == preValLength - && Bytes.equals(cell.getValueArray(), cell.getValueOffset(), vLength, - prevCell.getValueArray(), prevCell.getValueOffset(), preValLength)) { + && CellUtil.matchingValue(cell, prevCell)) { flag |= FLAG_SAME_VALUE; } @@ -327,7 +326,7 @@ public class FastDiffDeltaEncoder extends BufferedDataBlockEncoder { // Write the value if it is not the same as before. if ((flag & FLAG_SAME_VALUE) == 0) { - out.write(cell.getValueArray(), cell.getValueOffset(), vLength); + CellUtil.writeValue(cell, vLength, out); } } return kLength + vLength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java index 0286eca..6bca166 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java @@ -68,7 +68,7 @@ public class PrefixKeyDeltaEncoder extends BufferedDataBlockEncoder { writeKeyExcludingCommon(cell, common, out); } // Write the value part - out.write(cell.getValueArray(), cell.getValueOffset(), vlength); + CellUtil.writeValue(cell, vlength, out); int size = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE; size += afterEncodingKeyValue(cell, out, encodingContext); state.prevCell = cell; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java index 6f348bc..aa34015 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java @@ -162,9 +162,10 @@ public final class ByteBufferUtils { out.write(in.array(), in.arrayOffset() + offset, length); } else { - for (int i = 0; i < length; ++i) { - out.write(in.get(offset + i)); - } + // TODO check for any optimization + byte[] temp = new byte[length]; + copyFromBufferToArray(temp, in, offset, 0, length); + out.write(temp); } } @@ -497,6 +498,7 @@ public final class ByteBufferUtils { return Bytes.compareTo(buf1.array(), buf1.arrayOffset() + o1, len1, buf2.array(), buf2.arrayOffset() + o2, len2); } + // TODO add Unsafe based compare as in Bytes.java int end1 = o1 + len1; int end2 = o2 + len2; for (int i = o1, j = o2; i < end1 && j < end2; i++, j++) { @@ -508,4 +510,72 @@ public final class ByteBufferUtils { } return len1 - len2; } + + public static int compareTo(ByteBuffer buf1, int o1, int len1, byte[] buf2, int o2, int len2) { + // TODO add Unsafe based compare as in Bytes.java + int end1 = o1 + len1; + int end2 = o2 + len2; + for (int i = o1, j = o2; i < end1 && j < end2; i++, j++) { + int a = buf1.get(i) & 0xFF; + int b = buf2[j] & 0xFF; + if (a != b) { + return a - b; + } + } + return len1 - len2; + } + + public static boolean equals(final ByteBuffer left, int leftOffset, int leftLen, + final byte[] right, int rightOffset, int rightLen) { + // different lengths fast check + if (leftLen != rightLen) { + return false; + } + // Since we're often comparing adjacent sorted data, + // it's usual to have equal arrays except for the very last byte + // so check that first + // The getOffset() should not be used here + // TODO add getByte API here and use that instead of BB.get(offset). getByte() can make use of + // Unsafe API if unsafe available. + if (left.get(leftOffset + leftLen - 1) != right[rightOffset + rightLen - 1]) { + return false; + } + return compareTo(left, leftOffset, leftLen, right, rightOffset, rightLen) == 0; + } + + public static boolean equals(final ByteBuffer left, int leftOffset, int leftLen, + final ByteBuffer right, int rightOffset, int rightLen) { + if (leftLen != rightLen) { + return false; + } + // TODO add getByte API here and use that instead of BB.get(offset). getByte() can make use of + // Unsafe API if unsafe available. + if (left.get(leftOffset + leftLen - 1) != right.get(rightOffset + rightLen - 1)) { + return false; + } + return compareTo(left, leftOffset, leftLen, right, rightOffset, rightLen) == 0; + } + + public static int getInt(ByteBuffer buffer, int offset) { + // TODO add Unsafe read + return buffer.getInt(offset); + } + + public static long getLong(ByteBuffer buffer, int offset) { + // TODO add Unsafe read + return buffer.getLong(offset); + } + + public static int copyFromBufferToArray(byte[] out, ByteBuffer in, int sourceOffset, + int destinationOffset, int length) { + if (in.hasArray()) { + System.arraycopy(in.array(), sourceOffset + in.arrayOffset(), out, destinationOffset, length); + } else { + // TODO add Unsafe read + for (int i = 0; i < length; i++) { + out[destinationOffset + i] = in.get(sourceOffset + i); + } + } + return destinationOffset + length; + } } diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java index b95055c..8186b74 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java @@ -268,6 +268,7 @@ public class PrefixTreeSeeker implements EncodedSeeker { * of the key part is deep copied * */ + // TODO make it implements OutputStreamable private static class ClonedPrefixTreeCell implements Cell, SettableSequenceId, HeapSize { private static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT + (5 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + (4 * Bytes.SIZEOF_INT) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java index 9e0cb9b..87850f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java @@ -429,6 +429,7 @@ public abstract class BaseRegionObserver implements RegionObserver { } @Override + @Deprecated public boolean postScannerFilterRow(final ObserverContext e, final InternalScanner s, final byte[] currentRow, final int offset, final short length, final boolean hasMore) throws IOException { @@ -436,6 +437,14 @@ public abstract class BaseRegionObserver implements RegionObserver { } @Override + public boolean postScannerFilterRow(ObserverContext c, + InternalScanner s, Cell filteredRowFirstCell, boolean hasMore) throws IOException { + // fall back to old API for BC + return postScannerFilterRow(c, s, filteredRowFirstCell.getRowArray(), + filteredRowFirstCell.getRowOffset(), filteredRowFirstCell.getRowLength(), hasMore); + } + + @Override public void preScannerClose(final ObserverContext e, final InternalScanner s) throws IOException { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 7ee5a99..31ed41a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -1078,11 +1078,17 @@ public interface RegionObserver extends Coprocessor { * @param hasMore the 'has more' indication * @return whether more rows are available for the scanner or not * @throws IOException + * @deprecated instead implement {@link #postScannerFilterRow(ObserverContext, InternalScanner, Cell, boolean)} */ + @Deprecated boolean postScannerFilterRow(final ObserverContext c, final InternalScanner s, final byte[] currentRow, final int offset, final short length, final boolean hasMore) throws IOException; + boolean postScannerFilterRow(final ObserverContext c, + final InternalScanner s, final Cell filteredRowFirstCell, final boolean hasMore) + throws IOException; + /** * Called before the client closes a scanner. *

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 0555363..2ca8ac3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -237,9 +237,9 @@ public class HFileWriterImpl implements HFile.Writer { } /** Checks the given value for validity. */ - protected void checkValue(final byte[] value, final int offset, - final int length) throws IOException { - if (value == null) { + protected void checkValue(final Cell cell) throws IOException { + // TODO we need this check? + if (cell.getValueArray() == null) { throw new IOException("Value cannot be null"); } } @@ -537,12 +537,9 @@ public class HFileWriterImpl implements HFile.Writer { */ @Override public void append(final Cell cell) throws IOException { - byte[] value = cell.getValueArray(); - int voffset = cell.getValueOffset(); - int vlength = cell.getValueLength(); // checkKey uses comparator to check we are writing in order. boolean dupKey = checkKey(cell); - checkValue(value, voffset, vlength); + checkValue(cell); if (!dupKey) { checkBlockBoundary(); } @@ -554,7 +551,7 @@ public class HFileWriterImpl implements HFile.Writer { fsBlockWriter.write(cell); totalKeyLength += CellUtil.estimatedSerializedSizeOfKey(cell); - totalValueLength += vlength; + totalValueLength += cell.getValueLength(); // Are we the first key in this block? if (firstCellInBlock == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java index f75f6e9..3b422fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java @@ -53,7 +53,7 @@ public class NoOpDataBlockEncoder implements HFileDataBlockEncoder { out.writeInt(klength); out.writeInt(vlength); CellUtil.writeFlatKey(cell, out); - out.write(cell.getValueArray(), cell.getValueOffset(), vlength); + CellUtil.writeValue(cell, vlength, out); int encodedKvSize = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE; // Write the additional tag into the stream if (encodingCtx.getHFileContext().isIncludesTags()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java index 8568cfc..138ae15 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode; @@ -56,15 +57,13 @@ public interface ColumnTracker { * method based on the return type (INCLUDE) of this method. The values that can be returned by * this method are {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and * {@link MatchCode#SEEK_NEXT_ROW}. - * @param bytes - * @param offset - * @param length + * @param cell * @param type The type of the KeyValue * @return The match code instance. * @throws IOException in case there is an internal consistency problem caused by a data * corruption. */ - ScanQueryMatcher.MatchCode checkColumn(byte[] bytes, int offset, int length, byte type) + ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) throws IOException; /** @@ -84,7 +83,7 @@ public interface ColumnTracker { * @throws IOException in case there is an internal consistency problem caused by a data * corruption. */ - ScanQueryMatcher.MatchCode checkVersions(byte[] bytes, int offset, int length, long ttl, + ScanQueryMatcher.MatchCode checkVersions(Cell cell, long ttl, byte type, boolean ignoreCount) throws IOException; /** * Resets the Matcher @@ -113,9 +112,7 @@ public interface ColumnTracker { /** * Retrieve the MatchCode for the next row or column */ - MatchCode getNextRowOrNextColumn( - byte[] bytes, int offset, int qualLength - ); + MatchCode getNextRowOrNextColumn(Cell cell); /** * Give the tracker a chance to declare it's done based on only the timestamp diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java index b779e22..287f3f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.NavigableSet; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode; @@ -105,8 +106,7 @@ public class ExplicitColumnTracker implements ColumnTracker { * {@inheritDoc} */ @Override - public ScanQueryMatcher.MatchCode checkColumn(byte [] bytes, int offset, - int length, byte type) { + public ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) { // delete markers should never be passed to an // *Explicit*ColumnTracker assert !CellUtil.isDelete(type); @@ -122,8 +122,9 @@ public class ExplicitColumnTracker implements ColumnTracker { } // Compare specific column to current column + // TODO have an API CellComparator.compareQualifier(Cell, byte[], int, int) int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), - column.getLength(), bytes, offset, length); + column.getLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); // Column Matches. Return include code. The caller would call checkVersions // to limit the number of versions. @@ -156,7 +157,7 @@ public class ExplicitColumnTracker implements ColumnTracker { } @Override - public ScanQueryMatcher.MatchCode checkVersions(byte[] bytes, int offset, int length, + public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, boolean ignoreCount) throws IOException { assert !CellUtil.isDelete(type); if (ignoreCount) return ScanQueryMatcher.MatchCode.INCLUDE; @@ -215,14 +216,13 @@ public class ExplicitColumnTracker implements ColumnTracker { * this column. We may get this information from external filters or * timestamp range and we then need to indicate this information to * tracker. It is required only in case of ExplicitColumnTracker. - * @param bytes - * @param offset - * @param length + * @param cell */ - public void doneWithColumn(byte [] bytes, int offset, int length) { + public void doneWithColumn(Cell cell) { while (this.column != null) { - int compare = Bytes.compareTo(column.getBuffer(), column.getOffset(), - column.getLength(), bytes, offset, length); + // TODO have an API CellComparator.compareQualifier(Cell, byte[], int, int) + int compare = Bytes.compareTo(column.getBuffer(), column.getOffset(), column.getLength(), + cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); resetTS(); if (compare <= 0) { ++this.index; @@ -239,9 +239,9 @@ public class ExplicitColumnTracker implements ColumnTracker { } } - public MatchCode getNextRowOrNextColumn(byte[] bytes, int offset, - int qualLength) { - doneWithColumn(bytes, offset,qualLength); + @Override + public MatchCode getNextRowOrNextColumn(Cell cell) { + doneWithColumn(cell); if (getColumnHint() == null) { return MatchCode.SEEK_NEXT_ROW; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java index 4d22c0e..8594434 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java @@ -143,9 +143,7 @@ class GetClosestRowBeforeTracker { continue; } // Check column - int ret = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), - kv.getQualifierLength(), - d.getQualifierArray(), d.getQualifierOffset(), d.getQualifierLength()); + int ret = CellComparator.compareQualifiers(kv, d); if (ret <= -1) { // This delete is for an earlier column. continue; @@ -238,6 +236,7 @@ class GetClosestRowBeforeTracker { if (!metaregion) return true; // Compare start of keys row. Compare including delimiter. Saves having // to calculate where tablename ends in the candidate kv. + // TODO use methods in CellComparator and pass Cells rathen than byte[] to that. return Bytes.compareTo(this.targetkey.getRowArray(), this.rowoffset, this.tablenamePlusDelimiterLength, kv.getRowArray(), kv.getRowOffset(), this.tablenamePlusDelimiterLength) == 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 4a8e7cc..718fcdf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -89,6 +89,7 @@ import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; +import org.apache.hadoop.hbase.TagRewriteCell; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.backup.HFileArchiver; @@ -3268,6 +3269,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi matches = true; } else if (result.size() == 1 && !valueIsNull) { Cell kv = result.get(0); + // TODO how to add a compareTo(BB, int, int)? + // If we can then we can make CellUtil.compareTo(Cell, ByteArrayComparable) int compareResult = comparator.compareTo(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); switch (compareOp) { @@ -3348,6 +3351,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi matches = true; } else if (result.size() == 1 && !valueIsNull) { Cell kv = result.get(0); + // TODO how to add a compareTo(BB, int, int)? + // If we can then we can make CellUtil.compareTo(Cell, ByteArrayComparable) int compareResult = comparator.compareTo(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); switch (compareOp) { @@ -3480,12 +3485,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Rewrite the cell with the updated set of tags - cells.set(i, new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), - cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), - cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()), - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), - newTags)); + cells.set(i, new TagRewriteCell(cell, Tag.fromList(newTags))); } } } @@ -5356,7 +5356,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi assert joinedContinuationRow != null; NextState state = populateResult(results, this.joinedHeap, limit, resultSize, - joinedContinuationRow.getRowArray(), joinedContinuationRow.getRowOffset(), + joinedContinuationRow, joinedContinuationRow.getRowOffset(), joinedContinuationRow.getRowLength()); if (state != null && !state.batchLimitReached() && !state.sizeLimitReached()) { // We are done with this row, reset the continuation. @@ -5375,13 +5375,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * @param remainingResultSize The remaining space within our result size limit. A negative value * indicate no limit * @param batchLimit Max amount of KVs to place in result list, -1 means no limit. - * @param currentRow Byte array with key we are fetching. + * @param currentRowCell * @param offset offset for currentRow * @param length length for currentRow * @return state of last call to {@link KeyValueHeap#next()} */ private NextState populateResult(List results, KeyValueHeap heap, int batchLimit, - long remainingResultSize, byte[] currentRow, int offset, short length) throws IOException { + long remainingResultSize, Cell currentRowCell, int offset, short length) throws IOException { Cell nextKv; boolean moreCellsInRow = false; long accumulatedResultSize = 0; @@ -5399,7 +5399,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } nextKv = heap.peek(); - moreCellsInRow = moreCellsInRow(nextKv, currentRow, offset, length); + moreCellsInRow = moreCellsInRow(nextKv, currentRowCell, offset, length); boolean sizeLimitReached = remainingResultSize > 0 && accumulatedResultSize >= remainingResultSize; if (moreCellsInRow && sizeLimitReached) { @@ -5424,9 +5424,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * @param length * @return true When there are more cells in the row to be read */ - private boolean moreCellsInRow(final Cell nextKv, byte[] currentRow, int offset, + private boolean moreCellsInRow(final Cell nextKv, Cell currentRowCell, int offset, short length) { - return nextKv != null && CellUtil.matchingRow(nextKv, currentRow, offset, length); + // TODO use offset and length also and avoid getting it again + return nextKv != null && CellUtil.matchingRow(nextKv, currentRowCell); } /** @@ -5496,16 +5497,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Let's see what we have in the storeHeap. Cell current = this.storeHeap.peek(); - byte[] currentRow = null; int offset = 0; short length = 0; if (current != null) { - currentRow = current.getRowArray(); offset = current.getRowOffset(); length = current.getRowLength(); } - boolean stopRow = isStopRow(currentRow, offset, length); + boolean stopRow = isStopRow(current, offset, length); boolean hasFilterRow = this.filter != null && this.filter.hasFilterRow(); // If filter#hasFilterRow is true, partial results are not allowed since allowing them @@ -5534,15 +5533,15 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Check if rowkey filter wants to exclude this row. If so, loop to next. // Technically, if we hit limits before on this row, we don't need this call. - if (filterRowKey(currentRow, offset, length)) { - boolean moreRows = nextRow(currentRow, offset, length); + if (filterRowKey(current, offset, length)) { + boolean moreRows = nextRow(current, offset, length); if (!moreRows) return NextState.makeState(NextState.State.NO_MORE_VALUES, resultSize); results.clear(); continue; } NextState storeHeapState = - populateResult(results, this.storeHeap, batchLimit, remainingResultSize, currentRow, + populateResult(results, this.storeHeap, batchLimit, remainingResultSize, current, offset, length); resultSize += calculateResultSize(results, storeHeapState); // Invalid states should never be returned. If one is seen, throw exception @@ -5573,7 +5572,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } Cell nextKv = this.storeHeap.peek(); stopRow = nextKv == null || - isStopRow(nextKv.getRowArray(), nextKv.getRowOffset(), nextKv.getRowLength()); + isStopRow(nextKv, nextKv.getRowOffset(), nextKv.getRowLength()); // save that the row was empty before filters applied to it. final boolean isEmptyRow = results.isEmpty(); @@ -5586,7 +5585,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if ((isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE) || filterRow()) { results.clear(); - boolean moreRows = nextRow(currentRow, offset, length); + boolean moreRows = nextRow(current, offset, length); if (!moreRows) return NextState.makeState(NextState.State.NO_MORE_VALUES, 0); // This row was totally filtered out, if this is NOT the last row, @@ -5603,11 +5602,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi Cell nextJoinedKv = joinedHeap.peek(); // If joinedHeap is pointing to some other row, try to seek to a correct one. boolean mayHaveData = (nextJoinedKv != null && CellUtil.matchingRow(nextJoinedKv, - currentRow, offset, length)) + current)) || (this.joinedHeap.requestSeek( - KeyValueUtil.createFirstOnRow(currentRow, offset, length), true, true) + CellUtil.createFirstOnRow(current), true, true) && joinedHeap.peek() != null && CellUtil.matchingRow(joinedHeap.peek(), - currentRow, offset, length)); + current)); if (mayHaveData) { joinedContinuationRow = current; joinedHeapState = @@ -5641,7 +5640,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Double check to prevent empty rows from appearing in result. It could be // the case when SingleColumnValueExcludeFilter is used. if (results.isEmpty()) { - boolean moreRows = nextRow(currentRow, offset, length); + boolean moreRows = nextRow(current, offset, length); if (!moreRows) return NextState.makeState(NextState.State.NO_MORE_VALUES, 0); if (!stopRow) continue; } @@ -5669,30 +5668,32 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi && filter.filterRow(); } - private boolean filterRowKey(byte[] row, int offset, short length) throws IOException { + private boolean filterRowKey(Cell currentRowCell, int offset, short length) throws IOException { return filter != null - && filter.filterRowKey(row, offset, length); + && filter.filterRowKey(currentRowCell); } - protected boolean nextRow(byte [] currentRow, int offset, short length) throws IOException { + protected boolean nextRow(Cell currentRowCell, int offset, short length) throws IOException { assert this.joinedContinuationRow == null: "Trying to go to next row during joinedHeap read."; Cell next; while ((next = this.storeHeap.peek()) != null && - CellUtil.matchingRow(next, currentRow, offset, length)) { + CellUtil.matchingRow(next, currentRowCell)) { this.storeHeap.next(MOCKED_LIST); } resetFilters(); // Calling the hook in CP which allows it to do a fast forward return this.region.getCoprocessorHost() == null || this.region.getCoprocessorHost() - .postScannerFilterRow(this, currentRow, offset, length); + .postScannerFilterRow(this, currentRowCell); } - protected boolean isStopRow(byte[] currentRow, int offset, short length) { - return currentRow == null || + protected boolean isStopRow(Cell currentRowCell, int offset, short length) { + // TODO Comparators should allow to pass Cell rather than byte[]. HBASE-10800 will handle it. + // Then below call to getRowArray() can be avoided. + return currentRowCell == null || (stopRow != null && comparator.compareRows(stopRow, 0, stopRow.length, - currentRow, offset, length) <= isScan); + currentRowCell.getRowArray(), offset, length) <= isScan); } @Override @@ -6729,9 +6730,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi newCell.getQualifierArray(), newCell.getQualifierOffset(), cell.getQualifierLength()); // copy in the value - System.arraycopy(oldCell.getValueArray(), oldCell.getValueOffset(), - newCell.getValueArray(), newCell.getValueOffset(), - oldCell.getValueLength()); + CellUtil.copyValueTo(oldCell, newCell.getValueArray(), newCell.getValueOffset()); System.arraycopy(cell.getValueArray(), cell.getValueOffset(), newCell.getValueArray(), newCell.getValueOffset() + oldCell.getValueLength(), @@ -6750,15 +6749,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi List newTags = new ArrayList(1); newTags.add(new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(append.getTTL()))); // Add the new TTL tag - newCell = new KeyValue(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength(), - cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength(), - cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength(), - cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()), - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), - newTags); + newCell = new TagRewriteCell(cell, Tag.fromList(newTags)); } else { newCell = cell; } @@ -6960,7 +6951,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi c = results.get(idx); ts = Math.max(now, c.getTimestamp()); if(c.getValueLength() == Bytes.SIZEOF_LONG) { - amount += Bytes.toLong(c.getValueArray(), c.getValueOffset(), Bytes.SIZEOF_LONG); + amount += CellUtil.getValueAsLong(c); } else { // throw DoNotRetryIOException instead of IllegalArgumentException throw new org.apache.hadoop.hbase.DoNotRetryIOException( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 6e23952..124ad8b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -1369,14 +1369,14 @@ public class RegionCoprocessorHost * @return whether more rows are available for the scanner or not * @throws IOException */ - public boolean postScannerFilterRow(final InternalScanner s, final byte[] currentRow, - final int offset, final short length) throws IOException { + public boolean postScannerFilterRow(final InternalScanner s, final Cell currentRowCell) + throws IOException { return execOperationWithResult(true, coprocessors.isEmpty() ? null : new RegionOperationWithResult() { @Override public void call(RegionObserver oserver, ObserverContext ctx) throws IOException { - setResult(oserver.postScannerFilterRow(ctx, s, currentRow, offset,length, getResult())); + setResult(oserver.postScannerFilterRow(ctx, s, currentRowCell, getResult())); } }); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java index c7ce180..6bf0ed0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java @@ -77,9 +77,7 @@ public class ReversedKeyValueHeap extends KeyValueHeap { KeyValueScanner scanner; while ((scanner = heap.poll()) != null) { Cell topKey = scanner.peek(); - if (comparator.getComparator().compareRows(topKey.getRowArray(), - topKey.getRowOffset(), topKey.getRowLength(), seekKey.getRowArray(), - seekKey.getRowOffset(), seekKey.getRowLength()) < 0) { + if (comparator.getComparator().compareRows(topKey, seekKey) < 0) { // Row of Top KeyValue is before Seek row. heap.add(scanner); current = pollRealKV(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java index feda699..8a43bad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; @@ -56,24 +58,24 @@ class ReversedRegionScannerImpl extends RegionScannerImpl { } @Override - protected boolean isStopRow(byte[] currentRow, int offset, short length) { - return currentRow == null + protected boolean isStopRow(Cell currentRowCell, int offset, short length) { + // TODO avoid passing getRowArray to Comparator. HBASE-10800 will do this. + return currentRowCell == null || (super.stopRow != null && region.getComparator().compareRows( - stopRow, 0, stopRow.length, currentRow, offset, length) >= super.isScan); + stopRow, 0, stopRow.length, currentRowCell.getRowArray(), offset, length) >= super.isScan); } @Override - protected boolean nextRow(byte[] currentRow, int offset, short length) + protected boolean nextRow(Cell currentRowCell, int offset, short length) throws IOException { assert super.joinedContinuationRow == null : "Trying to go to next row during joinedHeap read."; byte row[] = new byte[length]; - System.arraycopy(currentRow, offset, row, 0, length); + CellUtil.copyRowTo(currentRowCell, row, 0); this.storeHeap.seekToPreviousRow(KeyValueUtil.createFirstOnRow(row)); resetFilters(); // Calling the hook in CP which allows it to do a fast forward if (this.region.getCoprocessorHost() != null) { - return this.region.getCoprocessorHost().postScannerFilterRow(this, - currentRow, offset, length); + return this.region.getCoprocessorHost().postScannerFilterRow(this, currentRowCell); } return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java index a5c17fb..8961b66 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java @@ -24,6 +24,8 @@ import java.util.TreeSet; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.util.Bytes; @@ -48,9 +50,7 @@ public class ScanDeleteTracker implements DeleteTracker { protected boolean hasFamilyStamp = false; protected long familyStamp = 0L; protected SortedSet familyVersionStamps = new TreeSet(); - protected byte [] deleteBuffer = null; - protected int deleteOffset = 0; - protected int deleteLength = 0; + protected Cell deleteCell = null; protected byte deleteType = 0; protected long deleteTimestamp = 0L; @@ -71,8 +71,6 @@ public class ScanDeleteTracker implements DeleteTracker { @Override public void add(Cell cell) { long timestamp = cell.getTimestamp(); - int qualifierOffset = cell.getQualifierOffset(); - int qualifierLength = cell.getQualifierLength(); byte type = cell.getTypeByte(); if (!hasFamilyStamp || timestamp > familyStamp) { if (type == KeyValue.Type.DeleteFamily.getCode()) { @@ -84,17 +82,14 @@ public class ScanDeleteTracker implements DeleteTracker { return; } - if (deleteBuffer != null && type < deleteType) { + if (deleteCell != null && type < deleteType) { // same column, so ignore less specific delete - if (Bytes.equals(deleteBuffer, deleteOffset, deleteLength, - cell.getQualifierArray(), qualifierOffset, qualifierLength)){ + if (CellUtil.matchingQualifier(deleteCell, cell)){ return; } } // new column, or more general delete type - deleteBuffer = cell.getQualifierArray(); - deleteOffset = qualifierOffset; - deleteLength = qualifierLength; + deleteCell = cell; deleteType = type; deleteTimestamp = timestamp; } @@ -121,9 +116,8 @@ public class ScanDeleteTracker implements DeleteTracker { return DeleteResult.FAMILY_VERSION_DELETED; } - if (deleteBuffer != null) { - int ret = Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength, - cell.getQualifierArray(), qualifierOffset, qualifierLength); + if (deleteCell != null) { + int ret = CellComparator.compareQualifiers(deleteCell, cell); if (ret == 0) { if (deleteType == KeyValue.Type.DeleteColumn.getCode()) { @@ -138,13 +132,13 @@ public class ScanDeleteTracker implements DeleteTracker { assert timestamp < deleteTimestamp; // different timestamp, let's clear the buffer. - deleteBuffer = null; + deleteCell = null; } else if(ret < 0){ // Next column case. - deleteBuffer = null; + deleteCell = null; } else { throw new IllegalStateException("isDelete failed: deleteBuffer=" - + Bytes.toStringBinary(deleteBuffer, deleteOffset, deleteLength) + + Bytes.toStringBinary(deleteCell.getQualifierArray(), deleteCell.getQualifierOffset(), deleteCell.getQualifierLength()) + ", qualifier=" + Bytes.toStringBinary(cell.getQualifierArray(), qualifierOffset, qualifierLength) + ", timestamp=" + timestamp + ", comparison result: " + ret); @@ -156,7 +150,7 @@ public class ScanDeleteTracker implements DeleteTracker { @Override public boolean isEmpty() { - return deleteBuffer == null && !hasFamilyStamp && + return deleteCell == null && !hasFamilyStamp && familyVersionStamps.isEmpty(); } @@ -166,7 +160,7 @@ public class ScanDeleteTracker implements DeleteTracker { hasFamilyStamp = false; familyStamp = 0L; familyVersionStamps.clear(); - deleteBuffer = null; + deleteCell = null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index 032b4ce..7387cd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -90,9 +90,7 @@ public class ScanQueryMatcher { /* row is not private for tests */ /** Row the query is on */ - byte [] row; - int rowOffset; - short rowLength; + Cell rowCell; /** * Oldest put in any of the involved store files @@ -278,8 +276,7 @@ public class ScanQueryMatcher { if (filter != null && filter.filterAllRemaining()) { return MatchCode.DONE_SCAN; } - int ret = this.rowComparator.compareRows(row, this.rowOffset, this.rowLength, - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + int ret = this.rowComparator.compareRows(rowCell, cell); if (!this.isReversed) { if (ret <= -1) { return MatchCode.DONE; @@ -306,14 +303,10 @@ public class ScanQueryMatcher { return MatchCode.SEEK_NEXT_ROW; } - int qualifierOffset = cell.getQualifierOffset(); - int qualifierLength = cell.getQualifierLength(); - long timestamp = cell.getTimestamp(); // check for early out based on timestamp alone if (columns.isDone(timestamp)) { - return columns.getNextRowOrNextColumn(cell.getQualifierArray(), qualifierOffset, - qualifierLength); + return columns.getNextRowOrNextColumn(cell); } // check if the cell is expired by cell TTL if (HStore.isCellTTLExpired(cell, this.oldestUnexpiredTS, this.now)) { @@ -372,8 +365,7 @@ public class ScanQueryMatcher { if (timestamp < earliestPutTs) { // keeping delete rows, but there are no puts older than // this delete in the store files. - return columns.getNextRowOrNextColumn(cell.getQualifierArray(), - qualifierOffset, qualifierLength); + return columns.getNextRowOrNextColumn(cell); } // else: fall through and do version counting on the // delete markers @@ -387,8 +379,7 @@ public class ScanQueryMatcher { switch (deleteResult) { case FAMILY_DELETED: case COLUMN_DELETED: - return columns.getNextRowOrNextColumn(cell.getQualifierArray(), - qualifierOffset, qualifierLength); + return columns.getNextRowOrNextColumn(cell); case VERSION_DELETED: case FAMILY_VERSION_DELETED: return MatchCode.SKIP; @@ -403,13 +394,11 @@ public class ScanQueryMatcher { if (timestampComparison >= 1) { return MatchCode.SKIP; } else if (timestampComparison <= -1) { - return columns.getNextRowOrNextColumn(cell.getQualifierArray(), qualifierOffset, - qualifierLength); + return columns.getNextRowOrNextColumn(cell); } // STEP 1: Check if the column is part of the requested columns - MatchCode colChecker = columns.checkColumn(cell.getQualifierArray(), - qualifierOffset, qualifierLength, typeByte); + MatchCode colChecker = columns.checkColumn(cell, typeByte); if (colChecker == MatchCode.INCLUDE) { ReturnCode filterResponse = ReturnCode.SKIP; // STEP 2: Yes, the column is part of the requested columns. Check if filter is present @@ -420,8 +409,7 @@ public class ScanQueryMatcher { case SKIP: return MatchCode.SKIP; case NEXT_COL: - return columns.getNextRowOrNextColumn(cell.getQualifierArray(), - qualifierOffset, qualifierLength); + return columns.getNextRowOrNextColumn(cell); case NEXT_ROW: stickyNextRow = true; return MatchCode.SEEK_NEXT_ROW; @@ -451,10 +439,8 @@ public class ScanQueryMatcher { * In all the above scenarios, we return the column checker return value except for * FilterResponse (INCLUDE_AND_SEEK_NEXT_COL) and ColumnChecker(INCLUDE) */ - colChecker = - columns.checkVersions(cell.getQualifierArray(), qualifierOffset, - qualifierLength, timestamp, typeByte, - mvccVersion > maxReadPointToTrackVersions); + colChecker = columns.checkVersions(cell, timestamp, typeByte, + mvccVersion > maxReadPointToTrackVersions); //Optimize with stickyNextRow stickyNextRow = colChecker == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW ? true : stickyNextRow; return (filterResponse == ReturnCode.INCLUDE_AND_NEXT_COL && @@ -494,6 +480,7 @@ public class ScanQueryMatcher { } public boolean moreRowsMayExistAfter(Cell kv) { + // TODO pass kv itself to Comparator instead of array. HBASE-10800 itself will do these changes. if (this.isReversed) { if (rowComparator.compareRows(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), stopRow, 0, stopRow.length) <= 0) { @@ -513,16 +500,14 @@ public class ScanQueryMatcher { } } - /** - * Set current row - * @param row - */ - public void setRow(byte [] row, int offset, short length) { - checkPartialDropDeleteRange(row, offset, length); - this.row = row; - this.rowOffset = offset; - this.rowLength = length; - reset(); + // Only used by tests + void setRow(byte[] row, int offset, short length) { + KeyValue createFirstOnRow = KeyValueUtil.createFirstOnRow(row, offset, length); + setRowCell(createFirstOnRow); + } + + public void setRowCell(Cell rowCell) { + this.rowCell = rowCell; } public void reset() { @@ -559,23 +544,15 @@ public class ScanQueryMatcher { public Cell getKeyForNextColumn(Cell kv) { ColumnCount nextColumn = columns.getColumnHint(); if (nextColumn == null) { - return KeyValueUtil.createLastOnRow( - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), - kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); + return CellUtil.createLastOnRowColumn(kv); } else { - return KeyValueUtil.createFirstOnRow( - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), - nextColumn.getBuffer(), nextColumn.getOffset(), nextColumn.getLength()); + return CellUtil.createFirstOnRowColumn(kv, nextColumn.getBuffer(), nextColumn.getOffset(), + nextColumn.getLength()); } } public Cell getKeyForNextRow(Cell kv) { - return KeyValueUtil.createLastOnRow( - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - null, 0, 0, - null, 0, 0); + return CellUtil.createLastOnRow(kv); } /** @@ -584,6 +561,9 @@ public class ScanQueryMatcher { * @return result of the compare between the indexed key and the key portion of the passed cell */ public int compareKeyForNextRow(Cell nextIndexed, Cell kv) { + // TODO instead of passing byte[] of row, we have to pass Cell 'kv' itself to + // Comparator. When we change to CellComparator as part of HBASE-10800 we will have this. Patch + // already in progress in this direction return rowComparator.compareKey(nextIndexed, kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), null, 0, 0, @@ -598,6 +578,9 @@ public class ScanQueryMatcher { */ public int compareKeyForNextColumn(Cell nextIndexed, Cell kv) { ColumnCount nextColumn = columns.getColumnHint(); + // TODO instead of passing byte[] of row, fam parts etc, we have to pass Cell 'kv' itself to + // Comparator. When we change to CellComparator as part of HBASE-10800 we will have this. Patch + // already in progress in this direction if (nextColumn == null) { return rowComparator.compareKey(nextIndexed, kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), @@ -614,11 +597,11 @@ public class ScanQueryMatcher { } //Used only for testing purposes - static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int offset, - int length, long ttl, byte type, boolean ignoreCount) throws IOException { - MatchCode matchCode = columnTracker.checkColumn(bytes, offset, length, type); + static MatchCode checkColumn(ColumnTracker columnTracker, Cell cell, long ttl, byte type, + boolean ignoreCount) throws IOException { + MatchCode matchCode = columnTracker.checkColumn(cell, type); if (matchCode == MatchCode.INCLUDE) { - return columnTracker.checkVersions(bytes, offset, length, ttl, type, ignoreCount); + return columnTracker.checkVersions(cell, ttl, type, ignoreCount); } return matchCode; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java index 85b36fb..f803b6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java @@ -22,6 +22,8 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode; @@ -32,9 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes; */ @InterfaceAudience.Private public class ScanWildcardColumnTracker implements ColumnTracker { - private byte [] columnBuffer = null; - private int columnOffset = 0; - private int columnLength = 0; + private Cell column = null; private int currentCount = 0; private int maxVersions; private int minVersions; @@ -64,7 +64,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { * This receives puts *and* deletes. */ @Override - public MatchCode checkColumn(byte[] bytes, int offset, int length, byte type) + public MatchCode checkColumn(Cell cell, byte type) throws IOException { return MatchCode.INCLUDE; } @@ -75,18 +75,17 @@ public class ScanWildcardColumnTracker implements ColumnTracker { * take the version of the previous put (so eventually all but the last can be reclaimed). */ @Override - public ScanQueryMatcher.MatchCode checkVersions(byte[] bytes, int offset, int length, + public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, boolean ignoreCount) throws IOException { - if (columnBuffer == null) { + if (column == null) { // first iteration. - resetBuffer(bytes, offset, length); + resetBuffer(cell); if (ignoreCount) return ScanQueryMatcher.MatchCode.INCLUDE; // do not count a delete marker as another version return checkVersion(type, timestamp); } - int cmp = Bytes.compareTo(bytes, offset, length, - columnBuffer, columnOffset, columnLength); + int cmp = CellComparator.compareQualifiers(cell, column); if (cmp == 0) { if (ignoreCount) return ScanQueryMatcher.MatchCode.INCLUDE; @@ -102,7 +101,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { // new col > old col if (cmp > 0) { // switched columns, lets do something.x - resetBuffer(bytes, offset, length); + resetBuffer(cell); if (ignoreCount) return ScanQueryMatcher.MatchCode.INCLUDE; return checkVersion(type, timestamp); } @@ -114,13 +113,11 @@ public class ScanWildcardColumnTracker implements ColumnTracker { throw new IOException( "ScanWildcardColumnTracker.checkColumn ran into a column actually " + "smaller than the previous column: " + - Bytes.toStringBinary(bytes, offset, length)); + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); } - private void resetBuffer(byte[] bytes, int offset, int length) { - columnBuffer = bytes; - columnOffset = offset; - columnLength = length; + private void resetBuffer(Cell c) { + column = c; currentCount = 0; } @@ -152,7 +149,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { @Override public void reset() { - columnBuffer = null; + column = null; resetTSAndType(); } @@ -194,8 +191,8 @@ public class ScanWildcardColumnTracker implements ColumnTracker { return false; } - public MatchCode getNextRowOrNextColumn(byte[] bytes, int offset, - int qualLength) { + @Override + public MatchCode getNextRowOrNextColumn(Cell cell) { return MatchCode.SEEK_NEXT_COL; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index a8ee091..ff43faf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -215,9 +215,7 @@ public class StoreFileScanner implements KeyValueScanner { hfs.next(); setCurrentCell(hfs.getKeyValue()); if (this.stopSkippingKVsIfNextRow - && getComparator().compareRows(cur.getRowArray(), cur.getRowOffset(), - cur.getRowLength(), startKV.getRowArray(), startKV.getRowOffset(), - startKV.getRowLength()) > 0) { + && getComparator().compareRows(cur, startKV) > 0) { return false; } } @@ -313,6 +311,7 @@ public class StoreFileScanner implements KeyValueScanner { if (useBloom) { // check ROWCOL Bloom filter first. if (reader.getBloomFilterType() == BloomType.ROWCOL) { + // TODO check whether this kv can be BufferedCell or not haveToSeek = reader.passesGeneralBloomFilter(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); @@ -419,16 +418,13 @@ public class StoreFileScanner implements KeyValueScanner { public boolean seekToPreviousRow(Cell key) throws IOException { try { try { - KeyValue seekKey = KeyValueUtil.createFirstOnRow(key.getRowArray(), key.getRowOffset(), - key.getRowLength()); + Cell seekKey = CellUtil.createFirstOnRow(key); if (seekCount != null) seekCount.incrementAndGet(); - if (!hfs.seekBefore(seekKey.getBuffer(), seekKey.getKeyOffset(), - seekKey.getKeyLength())) { + if (!hfs.seekBefore(seekKey)) { close(); return false; } - KeyValue firstKeyOfPreviousRow = KeyValueUtil.createFirstOnRow(hfs.getKeyValue() - .getRowArray(), hfs.getKeyValue().getRowOffset(), hfs.getKeyValue().getRowLength()); + Cell firstKeyOfPreviousRow = CellUtil.createFirstOnRow(hfs.getKeyValue()); if (seekCount != null) seekCount.incrementAndGet(); if (!seekAtOrAfter(hfs, firstKeyOfPreviousRow)) { @@ -477,9 +473,7 @@ public class StoreFileScanner implements KeyValueScanner { public boolean backwardSeek(Cell key) throws IOException { seek(key); if (cur == null - || getComparator().compareRows(cur.getRowArray(), cur.getRowOffset(), - cur.getRowLength(), key.getRowArray(), key.getRowOffset(), - key.getRowLength()) > 0) { + || getComparator().compareRows(cur, key) > 0) { return seekToPreviousRow(key); } return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 298d5bc..a2ca4e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode; import org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** @@ -485,18 +484,14 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // only call setRow if the row changes; avoids confusing the query matcher // if scanning intra-row - byte[] row = peeked.getRowArray(); - int offset = peeked.getRowOffset(); - short length = peeked.getRowLength(); // If limit < 0 and remainingResultSize < 0 we can skip the row comparison because we know // the row has changed. Else it is possible we are still traversing the same row so we // must perform the row comparison. - if ((limit < 0 && remainingResultSize < 0) || matcher.row == null - || !Bytes.equals(row, offset, length, matcher.row, matcher.rowOffset, - matcher.rowLength)) { + if ((limit < 0 && remainingResultSize < 0) || matcher.rowCell == null + || !CellUtil.matchingRow(peeked, matcher.rowCell)) { this.countPerRow = 0; - matcher.setRow(row, offset, length); + matcher.setRowCell(peeked); } Cell cell; @@ -733,14 +728,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner if (kv == null) { kv = lastTopKey; } - byte[] row = kv.getRowArray(); - int offset = kv.getRowOffset(); - short length = kv.getRowLength(); - if ((matcher.row == null) || !Bytes.equals(row, offset, length, matcher.row, - matcher.rowOffset, matcher.rowLength)) { + if ((matcher.rowCell == null) || !CellUtil.matchingRow(kv, matcher.rowCell)) { this.countPerRow = 0; matcher.reset(); - matcher.setRow(row, offset, length); + matcher.setRowCell(kv); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java index 48a982e..07a19a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hbase.security.access; import java.io.IOException; +import java.util.Iterator; import java.util.Map; +import java.util.Map.Entry; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; @@ -28,9 +30,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.ByteRange; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.SimpleMutableByteRange; /** * NOTE: for internal use only by AccessController implementation @@ -62,11 +61,11 @@ class AccessControlFilter extends FilterBase { private User user; private boolean isSystemTable; private Strategy strategy; - private Map cfVsMaxVersions; + private Map cfVsMaxVersions; private int familyMaxVersions; private int currentVersions; - private ByteRange prevFam; - private ByteRange prevQual; + private Cell prevFamCell; + private Cell prevQualCell; /** * For Writable @@ -75,15 +74,13 @@ class AccessControlFilter extends FilterBase { } AccessControlFilter(TableAuthManager mgr, User ugi, TableName tableName, - Strategy strategy, Map cfVsMaxVersions) { + Strategy strategy, Map cfVsMaxVersions) { authManager = mgr; table = tableName; user = ugi; isSystemTable = tableName.isSystemTable(); this.strategy = strategy; this.cfVsMaxVersions = cfVsMaxVersions; - this.prevFam = new SimpleMutableByteRange(); - this.prevQual = new SimpleMutableByteRange(); } @Override @@ -91,21 +88,21 @@ class AccessControlFilter extends FilterBase { if (isSystemTable) { return ReturnCode.INCLUDE; } - if (prevFam.getBytes() == null - || (Bytes.compareTo(prevFam.getBytes(), prevFam.getOffset(), prevFam.getLength(), - cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) != 0)) { - prevFam.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); - // Similar to VisibilityLabelFilter - familyMaxVersions = cfVsMaxVersions.get(prevFam); - // Family is changed. Just unset curQualifier. - prevQual.unset(); + if (prevFamCell == null || !CellUtil.matchingFamily(prevFamCell, cell)) { + prevFamCell = cell; + // Iterating here because the cfVsMaxVersions map would ideally be very small. + Iterator> iterator = cfVsMaxVersions.entrySet().iterator(); + while (iterator.hasNext()) { + Entry entry = iterator.next(); + if (CellUtil.matchingFamily(cell, entry.getKey(), 0, entry.getKey().length)) { + familyMaxVersions = entry.getValue(); + break; + } + } + prevQualCell = null; } - if (prevQual.getBytes() == null - || (Bytes.compareTo(prevQual.getBytes(), prevQual.getOffset(), - prevQual.getLength(), cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()) != 0)) { - prevQual.set(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); + if (prevQualCell == null || !(CellUtil.matchingQualifier(prevQualCell, cell))) { + prevQualCell = cell; currentVersions = 0; } currentVersions++; @@ -113,6 +110,7 @@ class AccessControlFilter extends FilterBase { return ReturnCode.SKIP; } // XXX: Compare in place, don't clone + // TODO byte[] family = CellUtil.cloneFamily(cell); byte[] qualifier = CellUtil.cloneQualifier(cell); switch (strategy) { @@ -140,8 +138,8 @@ class AccessControlFilter extends FilterBase { @Override public void reset() throws IOException { - this.prevFam.unset(); - this.prevQual.unset(); + this.prevFamCell = null; + this.prevQualCell = null; this.familyMaxVersions = 0; this.currentVersions = 0; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 2bab7e8..80ff645 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hbase.security.access; import java.io.IOException; import java.net.InetAddress; import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; @@ -100,11 +101,9 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.Permission.Action; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.SimpleMutableByteRange; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import com.google.common.collect.ArrayListMultimap; @@ -792,16 +791,15 @@ public class AccessController extends BaseMasterAndRegionObserver // This Map is identical to familyMap. The key is a BR rather than byte[]. // It will be easy to do gets over this new Map as we can create get keys over the Cell cf by // new SimpleByteRange(cell.familyArray, cell.familyOffset, cell.familyLen) - Map> familyMap1 = new HashMap>(); + Map> familyMap1 = new HashMap>(); for (Entry> entry : familyMap.entrySet()) { if (entry.getValue() instanceof List) { - familyMap1.put(new SimpleMutableByteRange(entry.getKey()), (List) entry.getValue()); + familyMap1.put(entry.getKey(), (List) entry.getValue()); } } RegionScanner scanner = getRegion(e).getScanner(new Scan(get)); List cells = Lists.newArrayList(); Cell prevCell = null; - ByteRange curFam = new SimpleMutableByteRange(); boolean curColAllVersions = (request == OpType.DELETE); long curColCheckTs = opTs; boolean foundColumn = false; @@ -822,8 +820,15 @@ public class AccessController extends BaseMasterAndRegionObserver continue; } if (colChange && considerCellTs) { - curFam.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); - List cols = familyMap1.get(curFam); + List cols = new ArrayList(); + Iterator>> iterator = familyMap1.entrySet().iterator(); + while (iterator.hasNext()) { + Entry> entry = iterator.next(); + if (CellUtil.matchingFamily(cell, entry.getKey())) { + cols = entry.getValue(); + break; + } + } for (Cell col : cols) { // null/empty qualifier is used to denote a Family delete. The TS and delete type // associated with this is applicable for all columns within the family. That is @@ -1466,9 +1471,9 @@ public class AccessController extends BaseMasterAndRegionObserver AuthResult authResult = permissionGranted(opType, user, env, families, Action.READ); Region region = getRegion(env); TableName table = getTableName(region); - Map cfVsMaxVersions = Maps.newHashMap(); + Map cfVsMaxVersions = Maps.newHashMap(); for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) { - cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), hcd.getMaxVersions()); + cfVsMaxVersions.put(hcd.getName(), hcd.getMaxVersions()); } if (!authResult.isAllowed()) { if (!cellFeaturesEnabled || compatibleEarlyTermination) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java index 34ccb4a..cfce51d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java @@ -176,11 +176,10 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService Map> userAuths = new HashMap>(); for (List cells : labelDetails) { for (Cell cell : cells) { - if (Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength(), LABEL_QUALIFIER, 0, LABEL_QUALIFIER.length)) { + if (CellUtil.matchingQualifier(cell, LABEL_QUALIFIER)) { labels.put( Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()), - Bytes.toInt(cell.getRowArray(), cell.getRowOffset())); + CellUtil.getRowAsInt(cell)); } else { // These are user cells who has authorization for this label String user = Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), @@ -190,7 +189,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService auths = new ArrayList(); userAuths.put(user, auths); } - auths.add(Bytes.toInt(cell.getRowArray(), cell.getRowOffset())); + auths.add(CellUtil.getRowAsInt(cell)); } } } @@ -346,7 +345,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService scanner.next(results); if (results.isEmpty()) break; Cell cell = results.get(0); - int ordinal = Bytes.toInt(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + int ordinal = CellUtil.getRowAsInt(cell); String label = this.labelsCache.getLabel(ordinal); if (label != null) { auths.add(label); @@ -383,7 +382,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService scanner.next(results); if (results.isEmpty()) break; Cell cell = results.get(0); - int ordinal = Bytes.toInt(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + int ordinal = CellUtil.getRowAsInt(cell); String label = this.labelsCache.getLabel(ordinal); if (label != null) { auths.add(label); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java index eb8abbe..bdfa388 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java @@ -18,14 +18,14 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; +import java.util.Iterator; import java.util.Map; +import java.util.Map.Entry; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.filter.FilterBase; -import org.apache.hadoop.hbase.util.ByteRange; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.SimpleMutableByteRange; /** * This Filter checks the visibility expression with each KV against visibility labels associated @@ -35,39 +35,38 @@ import org.apache.hadoop.hbase.util.SimpleMutableByteRange; class VisibilityLabelFilter extends FilterBase { private final VisibilityExpEvaluator expEvaluator; - private final Map cfVsMaxVersions; - private final ByteRange curFamily; - private final ByteRange curQualifier; + private final Map cfVsMaxVersions; + private Cell curFamCell; + private Cell curQualCell; private int curFamilyMaxVersions; private int curQualMetVersions; public VisibilityLabelFilter(VisibilityExpEvaluator expEvaluator, - Map cfVsMaxVersions) { + Map cfVsMaxVersions) { this.expEvaluator = expEvaluator; this.cfVsMaxVersions = cfVsMaxVersions; - this.curFamily = new SimpleMutableByteRange(); - this.curQualifier = new SimpleMutableByteRange(); } @Override public ReturnCode filterKeyValue(Cell cell) throws IOException { - if (curFamily.getBytes() == null - || (Bytes.compareTo(curFamily.getBytes(), curFamily.getOffset(), curFamily.getLength(), - cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) != 0)) { - curFamily.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); - // For this family, all the columns can have max of curFamilyMaxVersions versions. No need to - // consider the older versions for visibility label check. - // Ideally this should have been done at a lower layer by HBase (?) - curFamilyMaxVersions = cfVsMaxVersions.get(curFamily); - // Family is changed. Just unset curQualifier. - curQualifier.unset(); + if (curFamCell == null + || !CellUtil.matchingFamily(curFamCell, cell)) { + curFamCell = cell; + // Iterating here because the cfVsMaxVersions map would ideally be very small. + Iterator> iterator = cfVsMaxVersions.entrySet().iterator(); + while (iterator.hasNext()) { + Entry entry = iterator.next(); + if (CellUtil.matchingFamily(cell, entry.getKey(), 0, + entry.getKey().length)) { + curFamilyMaxVersions = entry.getValue(); + break; + } + } + curQualCell = null; } - if (curQualifier.getBytes() == null - || (Bytes.compareTo(curQualifier.getBytes(), curQualifier.getOffset(), - curQualifier.getLength(), cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()) != 0)) { - curQualifier.set(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); + if (curQualCell == null + || !(CellUtil.matchingQualifier(curQualCell, cell))) { + curQualCell = cell; curQualMetVersions = 0; } curQualMetVersions++; @@ -80,8 +79,8 @@ class VisibilityLabelFilter extends FilterBase { @Override public void reset() throws IOException { - this.curFamily.unset(); - this.curQualifier.unset(); + this.curFamCell = null; + this.curQualCell = null; this.curFamilyMaxVersions = 0; this.curQualMetVersions = 0; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java index 80e1d5d..b2b304b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java @@ -31,6 +31,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.Tag; @@ -75,8 +77,6 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { public void add(Cell delCell) { //Cannot call super.add because need to find if the delete needs to be considered long timestamp = delCell.getTimestamp(); - int qualifierOffset = delCell.getQualifierOffset(); - int qualifierLength = delCell.getQualifierLength(); byte type = delCell.getTypeByte(); if (type == KeyValue.Type.DeleteFamily.getCode()) { hasFamilyStamp = true; @@ -89,9 +89,8 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { return; } // new column, or more general delete type - if (deleteBuffer != null) { - if (Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength, delCell.getQualifierArray(), - qualifierOffset, qualifierLength) != 0) { + if (deleteCell != null) { + if (!CellUtil.matchingQualifier(deleteCell, delCell)) { // A case where there are deletes for a column qualifier but there are // no corresponding puts for them. Rare case. visibilityTagsDeleteColumns = null; @@ -107,9 +106,7 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { visiblityTagsDeleteColumnVersion = null; } } - deleteBuffer = delCell.getQualifierArray(); - deleteOffset = qualifierOffset; - deleteLength = qualifierLength; + deleteCell = delCell; deleteType = type; deleteTimestamp = timestamp; extractDeleteCellVisTags(delCell, KeyValue.Type.codeToType(type)); @@ -238,9 +235,8 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { } } } - if (deleteBuffer != null) { - int ret = Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength, - cell.getQualifierArray(), qualifierOffset, qualifierLength); + if (deleteCell != null) { + int ret = CellComparator.compareQualifiers(deleteCell, cell); if (ret == 0) { if (deleteType == KeyValue.Type.DeleteColumn.getCode()) { @@ -289,12 +285,12 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { } } else if (ret < 0) { // Next column case. - deleteBuffer = null; + deleteCell = null; visibilityTagsDeleteColumns = null; visiblityTagsDeleteColumnVersion = null; } else { throw new IllegalStateException("isDeleted failed: deleteBuffer=" - + Bytes.toStringBinary(deleteBuffer, deleteOffset, deleteLength) + ", qualifier=" + + Bytes.toStringBinary(deleteCell.getQualifierArray(), deleteCell.getQualifierOffset(), deleteCell.getQualifierLength()) + ", qualifier=" + Bytes.toStringBinary(cell.getQualifierArray(), qualifierOffset, qualifierLength) + ", timestamp=" + timestamp + ", comparison result: " + ret); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java index 916a34c..d1dd400 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java @@ -58,11 +58,9 @@ import org.apache.hadoop.hbase.security.visibility.expression.ExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.LeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.NonLeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.Operator; -import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.SimpleMutableByteRange; import org.apache.hadoop.util.ReflectionUtils; import com.google.protobuf.InvalidProtocolBufferException; @@ -310,9 +308,9 @@ public class VisibilityUtils { public static Filter createVisibilityLabelFilter(Region region, Authorizations authorizations) throws IOException { - Map cfVsMaxVersions = new HashMap(); + Map cfVsMaxVersions = new HashMap(); for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) { - cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), hcd.getMaxVersions()); + cfVsMaxVersions.put(hcd.getName(), hcd.getMaxVersions()); } VisibilityLabelService vls = VisibilityLabelServiceManager.getInstance() .getVisibilityLabelService(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java index 020781c..0d35b71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java @@ -59,7 +59,9 @@ public class TestExplicitColumnTracker { long timestamp = 0; //"Match" for(byte [] col : scannerColumns){ - result.add(ScanQueryMatcher.checkColumn(exp, col, 0, col.length, ++timestamp, + KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 0, 0, + HConstants.EMPTY_BYTE_ARRAY, 0, 0, col, 0, col.length); + result.add(ScanQueryMatcher.checkColumn(exp, kv, ++timestamp, KeyValue.Type.Put.getCode(), false)); } @@ -162,14 +164,18 @@ public class TestExplicitColumnTracker { Long.MIN_VALUE); for (int i = 0; i < 100000; i+=2) { byte [] col = Bytes.toBytes("col"+i); - ScanQueryMatcher.checkColumn(explicit, col, 0, col.length, 1, KeyValue.Type.Put.getCode(), + KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 0, 0, + HConstants.EMPTY_BYTE_ARRAY, 0, 0, col, 0, col.length); + ScanQueryMatcher.checkColumn(explicit, kv, 1, KeyValue.Type.Put.getCode(), false); } explicit.reset(); for (int i = 1; i < 100000; i+=2) { byte [] col = Bytes.toBytes("col"+i); - ScanQueryMatcher.checkColumn(explicit, col, 0, col.length, 1, KeyValue.Type.Put.getCode(), + KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 0, 0, + HConstants.EMPTY_BYTE_ARRAY, 0, 0, col, 0, col.length); + ScanQueryMatcher.checkColumn(explicit, kv, 1, KeyValue.Type.Put.getCode(), false); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java index c0dcee6..a8ed289 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java @@ -56,8 +56,10 @@ public class TestScanWildcardColumnTracker extends HBaseTestCase { List actual = new ArrayList(); for(byte [] qualifier : qualifiers) { + KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 0, 0, + HConstants.EMPTY_BYTE_ARRAY, 0, 0, qualifier, 0, qualifier.length); ScanQueryMatcher.MatchCode mc = - ScanQueryMatcher.checkColumn(tracker, qualifier, 0, qualifier.length, 1, + ScanQueryMatcher.checkColumn(tracker, kv, 1, KeyValue.Type.Put.getCode(), false); actual.add(mc); } @@ -90,8 +92,10 @@ public class TestScanWildcardColumnTracker extends HBaseTestCase { long timestamp = 0; for(byte [] qualifier : qualifiers) { + KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 0, 0, + HConstants.EMPTY_BYTE_ARRAY, 0, 0, qualifier, 0, qualifier.length); MatchCode mc = - ScanQueryMatcher.checkColumn(tracker, qualifier, 0, qualifier.length, ++timestamp, + ScanQueryMatcher.checkColumn(tracker, kv, ++timestamp, KeyValue.Type.Put.getCode(), false); actual.add(mc); } @@ -115,7 +119,9 @@ public class TestScanWildcardColumnTracker extends HBaseTestCase { try { for(byte [] qualifier : qualifiers) { - ScanQueryMatcher.checkColumn(tracker, qualifier, 0, qualifier.length, 1, + KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 0, 0, + HConstants.EMPTY_BYTE_ARRAY, 0, 0, qualifier, 0, qualifier.length); + ScanQueryMatcher.checkColumn(tracker, kv, 1, KeyValue.Type.Put.getCode(), false); } } catch (Exception e) {