From 95c0e0801705630b8477a0ac9c8bdc09f5c5839e Mon Sep 17 00:00:00 2001 From: Ashish Singhi Date: Thu, 16 Jul 2015 17:07:36 +0530 Subject: [PATCH] HBASE-13954 Remove HTableInterface#getRowOrBefore related server side code --- .../java/org/apache/hadoop/hbase/client/Get.java | 14 +- .../apache/hadoop/hbase/protobuf/ProtobufUtil.java | 33 -- .../hadoop/hbase/protobuf/RequestConverter.java | 30 -- .../hadoop/hbase/client/TestClientNoCluster.java | 8 - .../org/apache/hadoop/hbase/client/TestGet.java | 3 - .../hbase/protobuf/generated/ClientProtos.java | 395 +++++++-------------- hbase-protocol/src/main/protobuf/Client.proto | 7 +- .../hbase/coprocessor/BaseRegionObserver.java | 34 +- .../hadoop/hbase/coprocessor/RegionObserver.java | 84 ++--- .../hadoop/hbase/regionserver/DefaultMemStore.java | 118 +----- .../regionserver/GetClosestRowBeforeTracker.java | 242 ------------- .../apache/hadoop/hbase/regionserver/HRegion.java | 32 -- .../apache/hadoop/hbase/regionserver/HStore.java | 148 -------- .../apache/hadoop/hbase/regionserver/MemStore.java | 7 - .../hadoop/hbase/regionserver/RSRpcServices.java | 38 +- .../apache/hadoop/hbase/regionserver/Region.java | 11 - .../hbase/regionserver/RegionCoprocessorHost.java | 35 -- .../apache/hadoop/hbase/regionserver/Store.java | 14 - .../hbase/security/access/AccessController.java | 23 -- .../apache/hadoop/hbase/HBaseTestingUtility.java | 25 ++ .../hadoop/hbase/client/TestFromClientSide.java | 75 ++-- .../hbase/client/TestFromClientSideNoCodec.java | 1 - .../hbase/coprocessor/SimpleRegionObserver.java | 36 -- .../regionserver/TestGetClosestAtOrBefore.java | 43 +-- .../hadoop/hbase/regionserver/TestMinVersions.java | 8 +- .../access/TestWithDisabledAuthorization.java | 10 - 26 files changed, 282 insertions(+), 1192 deletions(-) delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index abf6b1c..b0e8992 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -110,7 +110,6 @@ public class Get extends Query this.storeOffset = get.getRowOffsetPerColumnFamily(); this.tr = get.getTimeRange(); this.checkExistenceOnly = get.isCheckExistenceOnly(); - this.closestRowBefore = get.isClosestRowBefore(); Map> fams = get.getFamilyMap(); for (Map.Entry> entry : fams.entrySet()) { byte [] fam = entry.getKey(); @@ -137,12 +136,23 @@ public class Get extends Query return this; } + /** + * This will always return the default value which is false as client cannot set the value to this + * property any more. + * @deprecated since 2.0.0 + */ + @Deprecated public boolean isClosestRowBefore() { return closestRowBefore; } + /** + * This is not used any more and does nothing. Use reverse scan instead. + * @deprecated since 2.0.0 + */ + @Deprecated public Get setClosestRowBefore(boolean closestRowBefore) { - this.closestRowBefore = closestRowBefore; + // do Nothing return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index b72f0bb..fd080dd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -489,9 +489,6 @@ public final class ProtobufUtil { if (proto.hasExistenceOnly() && proto.getExistenceOnly()){ get.setCheckExistenceOnly(true); } - if (proto.hasClosestRowBefore() && proto.getClosestRowBefore()){ - get.setClosestRowBefore(true); - } if (proto.hasConsistency()) { get.setConsistency(toConsistency(proto.getConsistency())); } @@ -1077,9 +1074,6 @@ public final class ProtobufUtil { if (get.isCheckExistenceOnly()){ builder.setExistenceOnly(true); } - if (get.isClosestRowBefore()){ - builder.setClosestRowBefore(true); - } if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) { builder.setConsistency(toConsistency(get.getConsistency())); } @@ -1550,33 +1544,6 @@ public final class ProtobufUtil { // Start helpers for Client /** - * A helper to get a row of the closet one before using client protocol. - * - * @param client - * @param regionName - * @param row - * @param family - * @return the row or the closestRowBefore if it doesn't exist - * @throws IOException - * @deprecated since 0.99 - use reversed scanner instead. - */ - @Deprecated - public static Result getRowOrBefore(final ClientService.BlockingInterface client, - final byte[] regionName, final byte[] row, - final byte[] family) throws IOException { - GetRequest request = - RequestConverter.buildGetRowOrBeforeRequest( - regionName, row, family); - try { - GetResponse response = client.get(null, request); - if (!response.hasResult()) return null; - return toResult(response.getResult()); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - /** * A helper to bulk load a list of HFiles using client protocol. * * @param client diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 96260fd..c111be2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionReques import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; @@ -123,35 +122,6 @@ public final class RequestConverter { // Start utilities for Client -/** - * Create a new protocol buffer GetRequest to get a row, all columns in a family. - * If there is no such row, return the closest row before it. - * - * @param regionName the name of the region to get - * @param row the row to get - * @param family the column family to get - * should return the immediate row before - * @return a protocol buffer GetReuqest - */ - public static GetRequest buildGetRowOrBeforeRequest( - final byte[] regionName, final byte[] row, final byte[] family) { - GetRequest.Builder builder = GetRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - - Column.Builder columnBuilder = Column.newBuilder(); - columnBuilder.setFamily(ByteStringer.wrap(family)); - ClientProtos.Get.Builder getBuilder = - ClientProtos.Get.newBuilder(); - getBuilder.setRow(ByteStringer.wrap(row)); - getBuilder.addColumn(columnBuilder.build()); - getBuilder.setClosestRowBefore(true); - builder.setGet(getBuilder.build()); - return builder.build(); - } - - /** * Create a protocol buffer GetRequest for a client Get * diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index 66a80b0..e941440 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -514,14 +514,6 @@ public class TestClientNoCluster extends Configured implements Tool { ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder(); ByteString row = request.getGet().getRow(); Pair p = meta.get(row.toByteArray()); - if (p == null) { - if (request.getGet().getClosestRowBefore()) { - byte [] bytes = row.toByteArray(); - SortedMap> head = - bytes != null? meta.headMap(bytes): meta; - p = head == null? null: head.get(head.lastKey()); - } - } if (p != null) { resultBuilder.addCell(getRegionInfo(row, p.getFirst())); resultBuilder.addCell(getServer(row, p.getSecond())); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java index 0e18ecf..f370751 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java @@ -168,7 +168,6 @@ public class TestGet { get.setReplicaId(2); get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); get.setCheckExistenceOnly(true); - get.setClosestRowBefore(true); get.setTimeRange(3, 4); get.setMaxVersions(11); get.setMaxResultsPerColumnFamily(10); @@ -191,9 +190,7 @@ public class TestGet { // from Get class assertEquals(get.isCheckExistenceOnly(), copyGet.isCheckExistenceOnly()); - assertEquals(get.isClosestRowBefore(), copyGet.isClosestRowBefore()); assertTrue(get.getTimeRange().equals(copyGet.getTimeRange())); - assertEquals(get.isClosestRowBefore(), copyGet.isClosestRowBefore()); assertEquals(get.getMaxVersions(), copyGet.getMaxVersions()); assertEquals(get.getMaxResultsPerColumnFamily(), copyGet.getMaxResultsPerColumnFamily()); assertEquals(get.getRowOffsetPerColumnFamily(), copyGet.getRowOffsetPerColumnFamily()); diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index c90a625..c4b1eec 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -1926,26 +1926,6 @@ public final class ClientProtos { */ boolean getExistenceOnly(); - // optional bool closest_row_before = 11 [default = false]; - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-     * If the row to get doesn't exist, return the
-     * closest row before.
-     * 
- */ - boolean hasClosestRowBefore(); - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-     * If the row to get doesn't exist, return the
-     * closest row before.
-     * 
- */ - boolean getClosestRowBefore(); - // optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; /** * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; @@ -1963,8 +1943,7 @@ public final class ClientProtos { ** * The protocol buffer version of Get. * Unless existence_only is specified, return all the requested data - * for the row that matches exactly, or the one that immediately - * precedes it if closest_row_before is specified. + * for the row that matches exactly. * */ public static final class Get extends @@ -2087,18 +2066,13 @@ public final class ClientProtos { existenceOnly_ = input.readBool(); break; } - case 88: { - bitField0_ |= 0x00000100; - closestRowBefore_ = input.readBool(); - break; - } case 96: { int rawValue = input.readEnum(); org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(12, rawValue); } else { - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000100; consistency_ = value; } break; @@ -2371,32 +2345,6 @@ public final class ClientProtos { return existenceOnly_; } - // optional bool closest_row_before = 11 [default = false]; - public static final int CLOSEST_ROW_BEFORE_FIELD_NUMBER = 11; - private boolean closestRowBefore_; - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-     * If the row to get doesn't exist, return the
-     * closest row before.
-     * 
- */ - public boolean hasClosestRowBefore() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-     * If the row to get doesn't exist, return the
-     * closest row before.
-     * 
- */ - public boolean getClosestRowBefore() { - return closestRowBefore_; - } - // optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; public static final int CONSISTENCY_FIELD_NUMBER = 12; private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_; @@ -2404,7 +2352,7 @@ public final class ClientProtos { * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; */ public boolean hasConsistency() { - return ((bitField0_ & 0x00000200) == 0x00000200); + return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; @@ -2424,7 +2372,6 @@ public final class ClientProtos { storeLimit_ = 0; storeOffset_ = 0; existenceOnly_ = false; - closestRowBefore_ = false; consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG; } private byte memoizedIsInitialized = -1; @@ -2492,9 +2439,6 @@ public final class ClientProtos { output.writeBool(10, existenceOnly_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeBool(11, closestRowBefore_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { output.writeEnum(12, consistency_.getNumber()); } getUnknownFields().writeTo(output); @@ -2548,10 +2492,6 @@ public final class ClientProtos { } if (((bitField0_ & 0x00000100) == 0x00000100)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(11, closestRowBefore_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - size += com.google.protobuf.CodedOutputStream .computeEnumSize(12, consistency_.getNumber()); } size += getUnknownFields().getSerializedSize(); @@ -2621,11 +2561,6 @@ public final class ClientProtos { result = result && (getExistenceOnly() == other.getExistenceOnly()); } - result = result && (hasClosestRowBefore() == other.hasClosestRowBefore()); - if (hasClosestRowBefore()) { - result = result && (getClosestRowBefore() - == other.getClosestRowBefore()); - } result = result && (hasConsistency() == other.hasConsistency()); if (hasConsistency()) { result = result && @@ -2684,10 +2619,6 @@ public final class ClientProtos { hash = (37 * hash) + EXISTENCE_ONLY_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getExistenceOnly()); } - if (hasClosestRowBefore()) { - hash = (37 * hash) + CLOSEST_ROW_BEFORE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getClosestRowBefore()); - } if (hasConsistency()) { hash = (37 * hash) + CONSISTENCY_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getConsistency()); @@ -2770,8 +2701,7 @@ public final class ClientProtos { ** * The protocol buffer version of Get. * Unless existence_only is specified, return all the requested data - * for the row that matches exactly, or the one that immediately - * precedes it if closest_row_before is specified. + * for the row that matches exactly. * */ public static final class Builder extends @@ -2849,10 +2779,8 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00000100); existenceOnly_ = false; bitField0_ = (bitField0_ & ~0x00000200); - closestRowBefore_ = false; - bitField0_ = (bitField0_ & ~0x00000400); consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG; - bitField0_ = (bitField0_ & ~0x00000800); + bitField0_ = (bitField0_ & ~0x00000400); return this; } @@ -2942,10 +2870,6 @@ public final class ClientProtos { if (((from_bitField0_ & 0x00000400) == 0x00000400)) { to_bitField0_ |= 0x00000100; } - result.closestRowBefore_ = closestRowBefore_; - if (((from_bitField0_ & 0x00000800) == 0x00000800)) { - to_bitField0_ |= 0x00000200; - } result.consistency_ = consistency_; result.bitField0_ = to_bitField0_; onBuilt(); @@ -3039,9 +2963,6 @@ public final class ClientProtos { if (other.hasExistenceOnly()) { setExistenceOnly(other.getExistenceOnly()); } - if (other.hasClosestRowBefore()) { - setClosestRowBefore(other.getClosestRowBefore()); - } if (other.hasConsistency()) { setConsistency(other.getConsistency()); } @@ -4029,66 +3950,13 @@ public final class ClientProtos { return this; } - // optional bool closest_row_before = 11 [default = false]; - private boolean closestRowBefore_ ; - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-       * If the row to get doesn't exist, return the
-       * closest row before.
-       * 
- */ - public boolean hasClosestRowBefore() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-       * If the row to get doesn't exist, return the
-       * closest row before.
-       * 
- */ - public boolean getClosestRowBefore() { - return closestRowBefore_; - } - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-       * If the row to get doesn't exist, return the
-       * closest row before.
-       * 
- */ - public Builder setClosestRowBefore(boolean value) { - bitField0_ |= 0x00000400; - closestRowBefore_ = value; - onChanged(); - return this; - } - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-       * If the row to get doesn't exist, return the
-       * closest row before.
-       * 
- */ - public Builder clearClosestRowBefore() { - bitField0_ = (bitField0_ & ~0x00000400); - closestRowBefore_ = false; - onChanged(); - return this; - } - // optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG; /** * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; */ public boolean hasConsistency() { - return ((bitField0_ & 0x00000800) == 0x00000800); + return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; @@ -4103,7 +3971,7 @@ public final class ClientProtos { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000800; + bitField0_ |= 0x00000400; consistency_ = value; onChanged(); return this; @@ -4112,7 +3980,7 @@ public final class ClientProtos { * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; */ public Builder clearConsistency() { - bitField0_ = (bitField0_ & ~0x00000800); + bitField0_ = (bitField0_ & ~0x00000400); consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG; onChanged(); return this; @@ -33253,135 +33121,134 @@ public final class ClientProtos { "o\032\017MapReduce.proto\"\037\n\016Authorizations\022\r\n\005" + "label\030\001 \003(\t\"$\n\016CellVisibility\022\022\n\nexpress" + "ion\030\001 \002(\t\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tq" + - "ualifier\030\002 \003(\014\"\201\003\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" + + "ualifier\030\002 \003(\014\"\336\002\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" + "olumn\030\002 \003(\0132\020.hbase.pb.Column\022*\n\tattribu" + "te\030\003 \003(\0132\027.hbase.pb.NameBytesPair\022 \n\006fil" + "ter\030\004 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_rang" + "e\030\005 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_vers", "ions\030\006 \001(\r:\0011\022\032\n\014cache_blocks\030\007 \001(\010:\004tru" + "e\022\023\n\013store_limit\030\010 \001(\r\022\024\n\014store_offset\030\t" + - " \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\022!\n\022c" + - "losest_row_before\030\013 \001(\010:\005false\0222\n\013consis" + - "tency\030\014 \001(\0162\025.hbase.pb.Consistency:\006STRO" + - "NG\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase.pb.C" + - "ell\022\035\n\025associated_cell_count\030\002 \001(\005\022\016\n\006ex" + - "ists\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n\007part" + - "ial\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006region" + - "\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032\n\003get", - "\030\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetResponse\022 \n\006" + - "result\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\tCondi" + - "tion\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqua" + - "lifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162\025.hba" + - "se.pb.CompareType\022(\n\ncomparator\030\005 \002(\0132\024." + - "hbase.pb.Comparator\"\364\006\n\rMutationProto\022\013\n" + - "\003row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.hbase." + - "pb.MutationProto.MutationType\0229\n\014column_" + - "value\030\003 \003(\0132#.hbase.pb.MutationProto.Col" + - "umnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattribute", - "\030\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\ndurab" + - "ility\030\006 \001(\0162\".hbase.pb.MutationProto.Dur" + - "ability:\013USE_DEFAULT\022\'\n\ntime_range\030\007 \001(\013" + - "2\023.hbase.pb.TimeRange\022\035\n\025associated_cell" + - "_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\371\001\n\013ColumnVa" + - "lue\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_value\030\002" + - " \003(\01322.hbase.pb.MutationProto.ColumnValu" + - "e.QualifierValue\032\214\001\n\016QualifierValue\022\021\n\tq" + - "ualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimesta" + - "mp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hbase.pb", - ".MutationProto.DeleteType\022\014\n\004tags\030\005 \001(\014\"" + - "W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010SKIP_W" + - "AL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSY" + - "NC_WAL\020\004\">\n\014MutationType\022\n\n\006APPEND\020\000\022\r\n\t" + - "INCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n\nDele" + - "teType\022\026\n\022DELETE_ONE_VERSION\020\000\022\034\n\030DELETE" + - "_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_FAMILY\020\002\022" + - "\031\n\025DELETE_FAMILY_VERSION\020\003\"\242\001\n\rMutateReq" + - "uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" + - "ecifier\022)\n\010mutation\030\002 \002(\0132\027.hbase.pb.Mut", - "ationProto\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb" + - ".Condition\022\023\n\013nonce_group\030\004 \001(\004\"E\n\016Mutat" + - "eResponse\022 \n\006result\030\001 \001(\0132\020.hbase.pb.Res" + - "ult\022\021\n\tprocessed\030\002 \001(\010\"\346\003\n\004Scan\022 \n\006colum" + - "n\030\001 \003(\0132\020.hbase.pb.Column\022*\n\tattribute\030\002" + - " \003(\0132\027.hbase.pb.NameBytesPair\022\021\n\tstart_r" + - "ow\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022 \n\006filter\030\005 \001" + - "(\0132\020.hbase.pb.Filter\022\'\n\ntime_range\030\006 \001(\013" + - "2\023.hbase.pb.TimeRange\022\027\n\014max_versions\030\007 " + - "\001(\r:\0011\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022\022\n\nba", - "tch_size\030\t \001(\r\022\027\n\017max_result_size\030\n \001(\004\022" + - "\023\n\013store_limit\030\013 \001(\r\022\024\n\014store_offset\030\014 \001" + - "(\r\022&\n\036load_column_families_on_demand\030\r \001" + - "(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010:\005fal" + - "se\0222\n\013consistency\030\020 \001(\0162\025.hbase.pb.Consi" + - "stency:\006STRONG\022\017\n\007caching\030\021 \001(\r\"\220\002\n\013Scan" + - "Request\022)\n\006region\030\001 \001(\0132\031.hbase.pb.Regio" + - "nSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb.Scan" + - "\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_rows\030\004" + - " \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext_call", - "_seq\030\006 \001(\004\022\037\n\027client_handles_partials\030\007 " + - "\001(\010\022!\n\031client_handles_heartbeats\030\010 \001(\010\022\032" + - "\n\022track_scan_metrics\030\t \001(\010\"\232\002\n\014ScanRespo" + - "nse\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscanner" + - "_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004" + - " \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022" + - "\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_resul" + - "t\030\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001(\010\022" + - "\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_metri" + - "cs\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n\024Bulk", - "LoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.hbase" + - ".pb.RegionSpecifier\022>\n\013family_path\030\002 \003(\013" + - "2).hbase.pb.BulkLoadHFileRequest.FamilyP" + - "ath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFamilyPat" + - "h\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkL" + - "oadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n\026Copr" + - "ocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014servi" + - "ce_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007re" + - "quest\030\004 \002(\014\"B\n\030CoprocessorServiceResult\022" + - "&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"", - "v\n\031CoprocessorServiceRequest\022)\n\006region\030\001" + - " \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004call\030" + - "\002 \002(\0132 .hbase.pb.CoprocessorServiceCall\"" + - "o\n\032CoprocessorServiceResponse\022)\n\006region\030" + - "\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n\005valu" + - "e\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Act" + - "ion\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.h" + - "base.pb.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hba" + - "se.pb.Get\0226\n\014service_call\030\004 \001(\0132 .hbase." + - "pb.CoprocessorServiceCall\"k\n\014RegionActio", - "n\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpeci" + - "fier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.h" + - "base.pb.Action\"D\n\017RegionLoadStats\022\027\n\014mem" + - "storeLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(" + - "\005:\0010\"\332\001\n\021ResultOrException\022\r\n\005index\030\001 \001(" + - "\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\te" + - "xception\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022" + - ":\n\016service_result\030\004 \001(\0132\".hbase.pb.Copro" + - "cessorServiceResult\022,\n\tloadStats\030\005 \001(\0132\031" + - ".hbase.pb.RegionLoadStats\"x\n\022RegionActio", - "nResult\0226\n\021resultOrException\030\001 \003(\0132\033.hba" + - "se.pb.ResultOrException\022*\n\texception\030\002 \001" + - "(\0132\027.hbase.pb.NameBytesPair\"x\n\014MultiRequ" + - "est\022,\n\014regionAction\030\001 \003(\0132\026.hbase.pb.Reg" + - "ionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tconditi" + - "on\030\003 \001(\0132\023.hbase.pb.Condition\"\\\n\rMultiRe" + - "sponse\0228\n\022regionActionResult\030\001 \003(\0132\034.hba" + - "se.pb.RegionActionResult\022\021\n\tprocessed\030\002 " + - "\001(\010*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELI" + - "NE\020\0012\203\004\n\rClientService\0222\n\003Get\022\024.hbase.pb", - ".GetRequest\032\025.hbase.pb.GetResponse\022;\n\006Mu" + - "tate\022\027.hbase.pb.MutateRequest\032\030.hbase.pb" + - ".MutateResponse\0225\n\004Scan\022\025.hbase.pb.ScanR" + - "equest\032\026.hbase.pb.ScanResponse\022P\n\rBulkLo" + - "adHFile\022\036.hbase.pb.BulkLoadHFileRequest\032" + - "\037.hbase.pb.BulkLoadHFileResponse\022X\n\013Exec" + - "Service\022#.hbase.pb.CoprocessorServiceReq" + - "uest\032$.hbase.pb.CoprocessorServiceRespon" + - "se\022d\n\027ExecRegionServerService\022#.hbase.pb" + - ".CoprocessorServiceRequest\032$.hbase.pb.Co", - "processorServiceResponse\0228\n\005Multi\022\026.hbas" + - "e.pb.MultiRequest\032\027.hbase.pb.MultiRespon" + - "seBB\n*org.apache.hadoop.hbase.protobuf.g" + - "eneratedB\014ClientProtosH\001\210\001\001\240\001\001" + " \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\0222\n\013c" + + "onsistency\030\014 \001(\0162\025.hbase.pb.Consistency:" + + "\006STRONG\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase" + + ".pb.Cell\022\035\n\025associated_cell_count\030\002 \001(\005\022" + + "\016\n\006exists\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n" + + "\007partial\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006r" + + "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032" + + "\n\003get\030\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetRespons", + "e\022 \n\006result\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\t" + + "Condition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021" + + "\n\tqualifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162" + + "\025.hbase.pb.CompareType\022(\n\ncomparator\030\005 \002" + + "(\0132\024.hbase.pb.Comparator\"\364\006\n\rMutationPro" + + "to\022\013\n\003row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.h" + + "base.pb.MutationProto.MutationType\0229\n\014co" + + "lumn_value\030\003 \003(\0132#.hbase.pb.MutationProt" + + "o.ColumnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattr" + + "ibute\030\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\n", + "durability\030\006 \001(\0162\".hbase.pb.MutationProt" + + "o.Durability:\013USE_DEFAULT\022\'\n\ntime_range\030" + + "\007 \001(\0132\023.hbase.pb.TimeRange\022\035\n\025associated" + + "_cell_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\371\001\n\013Col" + + "umnValue\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_va" + + "lue\030\002 \003(\01322.hbase.pb.MutationProto.Colum" + + "nValue.QualifierValue\032\214\001\n\016QualifierValue" + + "\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\tti" + + "mestamp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hba" + + "se.pb.MutationProto.DeleteType\022\014\n\004tags\030\005", + " \001(\014\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010S" + + "KIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r" + + "\n\tFSYNC_WAL\020\004\">\n\014MutationType\022\n\n\006APPEND\020" + + "\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n" + + "\nDeleteType\022\026\n\022DELETE_ONE_VERSION\020\000\022\034\n\030D" + + "ELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_FAMI" + + "LY\020\002\022\031\n\025DELETE_FAMILY_VERSION\020\003\"\242\001\n\rMuta" + + "teRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Reg" + + "ionSpecifier\022)\n\010mutation\030\002 \002(\0132\027.hbase.p" + + "b.MutationProto\022&\n\tcondition\030\003 \001(\0132\023.hba", + "se.pb.Condition\022\023\n\013nonce_group\030\004 \001(\004\"E\n\016" + + "MutateResponse\022 \n\006result\030\001 \001(\0132\020.hbase.p" + + "b.Result\022\021\n\tprocessed\030\002 \001(\010\"\346\003\n\004Scan\022 \n\006" + + "column\030\001 \003(\0132\020.hbase.pb.Column\022*\n\tattrib" + + "ute\030\002 \003(\0132\027.hbase.pb.NameBytesPair\022\021\n\tst" + + "art_row\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022 \n\006filte" + + "r\030\005 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_range\030" + + "\006 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_versio" + + "ns\030\007 \001(\r:\0011\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022" + + "\022\n\nbatch_size\030\t \001(\r\022\027\n\017max_result_size\030\n", + " \001(\004\022\023\n\013store_limit\030\013 \001(\r\022\024\n\014store_offse" + + "t\030\014 \001(\r\022&\n\036load_column_families_on_deman" + + "d\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010" + + ":\005false\0222\n\013consistency\030\020 \001(\0162\025.hbase.pb." + + "Consistency:\006STRONG\022\017\n\007caching\030\021 \001(\r\"\220\002\n" + + "\013ScanRequest\022)\n\006region\030\001 \001(\0132\031.hbase.pb." + + "RegionSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb" + + ".Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_r" + + "ows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext" + + "_call_seq\030\006 \001(\004\022\037\n\027client_handles_partia", + "ls\030\007 \001(\010\022!\n\031client_handles_heartbeats\030\010 " + + "\001(\010\022\032\n\022track_scan_metrics\030\t \001(\010\"\232\002\n\014Scan" + + "Response\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nsc" + + "anner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003" + + "ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Re" + + "sult\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_" + + "result\030\007 \003(\010\022\036\n\026more_results_in_region\030\010" + + " \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_" + + "metrics\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n" + + "\024BulkLoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.", + "hbase.pb.RegionSpecifier\022>\n\013family_path\030" + + "\002 \003(\0132).hbase.pb.BulkLoadHFileRequest.Fa" + + "milyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFami" + + "lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025" + + "BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n" + + "\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014" + + "service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022" + + "\017\n\007request\030\004 \002(\014\"B\n\030CoprocessorServiceRe" + + "sult\022&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytes" + + "Pair\"v\n\031CoprocessorServiceRequest\022)\n\006reg", + "ion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004" + + "call\030\002 \002(\0132 .hbase.pb.CoprocessorService" + + "Call\"o\n\032CoprocessorServiceResponse\022)\n\006re" + + "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n" + + "\005value\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001" + + "\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(" + + "\0132\027.hbase.pb.MutationProto\022\032\n\003get\030\003 \001(\0132" + + "\r.hbase.pb.Get\0226\n\014service_call\030\004 \001(\0132 .h" + + "base.pb.CoprocessorServiceCall\"k\n\014Region" + + "Action\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region", + "Specifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(" + + "\0132\020.hbase.pb.Action\"D\n\017RegionLoadStats\022\027" + + "\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy" + + "\030\002 \001(\005:\0010\"\332\001\n\021ResultOrException\022\r\n\005index" + + "\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result" + + "\022*\n\texception\030\003 \001(\0132\027.hbase.pb.NameBytes" + + "Pair\022:\n\016service_result\030\004 \001(\0132\".hbase.pb." + + "CoprocessorServiceResult\022,\n\tloadStats\030\005 " + + "\001(\0132\031.hbase.pb.RegionLoadStats\"x\n\022Region" + + "ActionResult\0226\n\021resultOrException\030\001 \003(\0132", + "\033.hbase.pb.ResultOrException\022*\n\texceptio" + + "n\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mult" + + "iRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase.p" + + "b.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tco" + + "ndition\030\003 \001(\0132\023.hbase.pb.Condition\"\\\n\rMu" + + "ltiResponse\0228\n\022regionActionResult\030\001 \003(\0132" + + "\034.hbase.pb.RegionActionResult\022\021\n\tprocess" + + "ed\030\002 \001(\010*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010T" + + "IMELINE\020\0012\203\004\n\rClientService\0222\n\003Get\022\024.hba" + + "se.pb.GetRequest\032\025.hbase.pb.GetResponse\022", + ";\n\006Mutate\022\027.hbase.pb.MutateRequest\032\030.hba" + + "se.pb.MutateResponse\0225\n\004Scan\022\025.hbase.pb." + + "ScanRequest\032\026.hbase.pb.ScanResponse\022P\n\rB" + + "ulkLoadHFile\022\036.hbase.pb.BulkLoadHFileReq" + + "uest\032\037.hbase.pb.BulkLoadHFileResponse\022X\n" + + "\013ExecService\022#.hbase.pb.CoprocessorServi" + + "ceRequest\032$.hbase.pb.CoprocessorServiceR" + + "esponse\022d\n\027ExecRegionServerService\022#.hba" + + "se.pb.CoprocessorServiceRequest\032$.hbase." + + "pb.CoprocessorServiceResponse\0228\n\005Multi\022\026", + ".hbase.pb.MultiRequest\032\027.hbase.pb.MultiR" + + "esponseBB\n*org.apache.hadoop.hbase.proto" + + "buf.generatedB\014ClientProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -33411,7 +33278,7 @@ public final class ClientProtos { internal_static_hbase_pb_Get_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_Get_descriptor, - new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "ClosestRowBefore", "Consistency", }); + new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "Consistency", }); internal_static_hbase_pb_Result_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_Result_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index 3390de7..101854d 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -64,8 +64,7 @@ enum Consistency { /** * The protocol buffer version of Get. * Unless existence_only is specified, return all the requested data - * for the row that matches exactly, or the one that immediately - * precedes it if closest_row_before is specified. + * for the row that matches exactly. */ message Get { required bytes row = 1; @@ -82,10 +81,6 @@ message Get { // the existence. optional bool existence_only = 10 [default = false]; - // If the row to get doesn't exist, return the - // closest row before. - optional bool closest_row_before = 11 [default = false]; - optional Consistency consistency = 12 [default = STRONG]; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java index f47c9f4..2e6d514 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java @@ -19,14 +19,14 @@ import java.io.IOException; import java.util.List; import java.util.NavigableSet; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; @@ -54,9 +54,9 @@ import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; -import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.wal.WALKey; import com.google.common.collect.ImmutableList; @@ -120,7 +120,7 @@ public abstract class BaseRegionObserver implements RegionObserver { @Override public void preSplit(ObserverContext e) throws IOException { } - + @Override public void preSplit(ObserverContext c, byte[] splitRow) throws IOException { @@ -130,22 +130,22 @@ public abstract class BaseRegionObserver implements RegionObserver { public void preSplitBeforePONR(ObserverContext ctx, byte[] splitKey, List metaEntries) throws IOException { } - + @Override public void preSplitAfterPONR( ObserverContext ctx) throws IOException { } - + @Override public void preRollBackSplit(ObserverContext ctx) throws IOException { } - + @Override public void postRollBackSplit( ObserverContext ctx) throws IOException { } - + @Override public void postCompleteSplit( ObserverContext ctx) throws IOException { @@ -219,18 +219,6 @@ public abstract class BaseRegionObserver implements RegionObserver { } @Override - public void preGetClosestRowBefore(final ObserverContext e, - final byte [] row, final byte [] family, final Result result) - throws IOException { - } - - @Override - public void postGetClosestRowBefore(final ObserverContext e, - final byte [] row, final byte [] family, final Result result) - throws IOException { - } - - @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { } @@ -253,12 +241,12 @@ public abstract class BaseRegionObserver implements RegionObserver { } @Override - public void prePut(final ObserverContext e, + public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException { } @Override - public void postPut(final ObserverContext e, + public void postPut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException { } @@ -278,7 +266,7 @@ public abstract class BaseRegionObserver implements RegionObserver { final Delete delete, final WALEdit edit, final Durability durability) throws IOException { } - + @Override public void preBatchMutate(final ObserverContext c, final MiniBatchOperationInProgress miniBatchOp) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index fd19ede..e87a590 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -382,7 +382,7 @@ public interface RegionObserver extends Coprocessor { void preSplitBeforePONR(final ObserverContext ctx, byte[] splitKey, List metaEntries) throws IOException; - + /** * This will be called after PONR step as part of split transaction * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no @@ -391,9 +391,9 @@ public interface RegionObserver extends Coprocessor { * @throws IOException */ void preSplitAfterPONR(final ObserverContext ctx) throws IOException; - + /** - * This will be called before the roll back of the split region is completed + * This will be called before the roll back of the split region is completed * @param ctx * @throws IOException */ @@ -419,7 +419,7 @@ public interface RegionObserver extends Coprocessor { * Called before the region is reported as closed to the master. * @param c the environment provided by the region server * @param abortRequested true if the region server is aborting - * @throws IOException + * @throws IOException */ void preClose(final ObserverContext c, boolean abortRequested) throws IOException; @@ -433,40 +433,6 @@ public interface RegionObserver extends Coprocessor { boolean abortRequested); /** - * Called before a client makes a GetClosestRowBefore request. - *

- * Call CoprocessorEnvironment#bypass to skip default actions - *

- * Call CoprocessorEnvironment#complete to skip any subsequent chained - * coprocessors - * @param c the environment provided by the region server - * @param row the row - * @param family the family - * @param result The result to return to the client if default processing - * is bypassed. Can be modified. Will not be used if default processing - * is not bypassed. - * @throws IOException if an error occurred on the coprocessor - */ - void preGetClosestRowBefore(final ObserverContext c, - final byte [] row, final byte [] family, final Result result) - throws IOException; - - /** - * Called after a client makes a GetClosestRowBefore request. - *

- * Call CoprocessorEnvironment#complete to skip any subsequent chained - * coprocessors - * @param c the environment provided by the region server - * @param row the row - * @param family the desired family - * @param result the result to return to the client, modify as necessary - * @throws IOException if an error occurred on the coprocessor - */ - void postGetClosestRowBefore(final ObserverContext c, - final byte [] row, final byte [] family, final Result result) - throws IOException; - - /** * Called before the client performs a Get *

* Call CoprocessorEnvironment#bypass to skip default actions @@ -543,7 +509,7 @@ public interface RegionObserver extends Coprocessor { * @param durability Persistence guarantee for this Put * @throws IOException if an error occurred on the coprocessor */ - void prePut(final ObserverContext c, + void prePut(final ObserverContext c, final Put put, final WALEdit edit, final Durability durability) throws IOException; @@ -558,7 +524,7 @@ public interface RegionObserver extends Coprocessor { * @param durability Persistence guarantee for this Put * @throws IOException if an error occurred on the coprocessor */ - void postPut(final ObserverContext c, + void postPut(final ObserverContext c, final Put put, final WALEdit edit, final Durability durability) throws IOException; @@ -575,7 +541,7 @@ public interface RegionObserver extends Coprocessor { * @param durability Persistence guarantee for this Delete * @throws IOException if an error occurred on the coprocessor */ - void preDelete(final ObserverContext c, + void preDelete(final ObserverContext c, final Delete delete, final WALEdit edit, final Durability durability) throws IOException; /** @@ -611,7 +577,7 @@ public interface RegionObserver extends Coprocessor { void postDelete(final ObserverContext c, final Delete delete, final WALEdit edit, final Durability durability) throws IOException; - + /** * This will be called for every batch mutation operation happening at the server. This will be * called after acquiring the locks on the mutating rows and after applying the proper timestamp @@ -658,7 +624,7 @@ public interface RegionObserver extends Coprocessor { * Called after the completion of batch put/delete and will be called even if the batch operation * fails * @param ctx - * @param miniBatchOp + * @param miniBatchOp * @param success true if batch operation is successful otherwise false. * @throws IOException */ @@ -679,7 +645,7 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param put data to put if check succeeds - * @param result + * @param result * @return the return value to return to client if bypassing default * processing * @throws IOException if an error occurred on the coprocessor @@ -693,8 +659,8 @@ public interface RegionObserver extends Coprocessor { /** * Called before checkAndPut but after acquiring rowlock. *

- * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, + * Note: Caution to be taken for not doing any long time operation in this hook. + * Row will be locked for longer time. Trying to acquire lock on another row, within this, * can lead to potential deadlock. *

* Call CoprocessorEnvironment#bypass to skip default actions @@ -708,14 +674,14 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param put data to put if check succeeds - * @param result + * @param result * @return the return value to return to client if bypassing default * processing * @throws IOException if an error occurred on the coprocessor */ boolean preCheckAndPutAfterRowLock(final ObserverContext c, final byte[] row, final byte[] family, final byte[] qualifier, final CompareOp compareOp, - final ByteArrayComparable comparator, final Put put, + final ByteArrayComparable comparator, final Put put, final boolean result) throws IOException; /** @@ -754,7 +720,7 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param delete delete to commit if check succeeds - * @param result + * @param result * @return the value to return to client if bypassing default processing * @throws IOException if an error occurred on the coprocessor */ @@ -767,8 +733,8 @@ public interface RegionObserver extends Coprocessor { /** * Called before checkAndDelete but after acquiring rowock. *

- * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, + * Note: Caution to be taken for not doing any long time operation in this hook. + * Row will be locked for longer time. Trying to acquire lock on another row, within this, * can lead to potential deadlock. *

* Call CoprocessorEnvironment#bypass to skip default actions @@ -782,7 +748,7 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param delete delete to commit if check succeeds - * @param result + * @param result * @return the value to return to client if bypassing default processing * @throws IOException if an error occurred on the coprocessor */ @@ -877,8 +843,8 @@ public interface RegionObserver extends Coprocessor { /** * Called before Append but after acquiring rowlock. *

- * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, + * Note: Caution to be taken for not doing any long time operation in this hook. + * Row will be locked for longer time. Trying to acquire lock on another row, within this, * can lead to potential deadlock. *

* Call CoprocessorEnvironment#bypass to skip default actions @@ -927,14 +893,14 @@ public interface RegionObserver extends Coprocessor { /** * Called before Increment but after acquiring rowlock. *

- * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, + * Note: Caution to be taken for not doing any long time operation in this hook. + * Row will be locked for longer time. Trying to acquire lock on another row, within this, * can lead to potential deadlock. *

* Call CoprocessorEnvironment#bypass to skip default actions *

* Call CoprocessorEnvironment#complete to skip any subsequent chained coprocessors - * + * * @param c * the environment provided by the region server * @param increment @@ -1227,7 +1193,7 @@ public interface RegionObserver extends Coprocessor { * Called before creation of Reader for a store file. * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no * effect in this hook. - * + * * @param ctx the environment provided by the region server * @param fs fileystem to read from * @param p path to the file @@ -1246,7 +1212,7 @@ public interface RegionObserver extends Coprocessor { /** * Called after the creation of Reader for a store file. - * + * * @param ctx the environment provided by the region server * @param fs fileystem to read from * @param p path to the file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 161e4b6..cc8c3a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -31,15 +31,14 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.Bytes; @@ -385,85 +384,6 @@ public class DefaultMemStore implements MemStore { } /** - * @param state column/delete tracking state - */ - @Override - public void getRowKeyAtOrBefore(final GetClosestRowBeforeTracker state) { - getRowKeyAtOrBefore(cellSet, state); - getRowKeyAtOrBefore(snapshot, state); - } - - /* - * @param set - * @param state Accumulates deletes and candidates. - */ - private void getRowKeyAtOrBefore(final NavigableSet set, - final GetClosestRowBeforeTracker state) { - if (set.isEmpty()) { - return; - } - if (!walkForwardInSingleRow(set, state.getTargetKey(), state)) { - // Found nothing in row. Try backing up. - getRowKeyBefore(set, state); - } - } - - /* - * Walk forward in a row from firstOnRow. Presumption is that - * we have been passed the first possible key on a row. As we walk forward - * we accumulate deletes until we hit a candidate on the row at which point - * we return. - * @param set - * @param firstOnRow First possible key on this row. - * @param state - * @return True if we found a candidate walking this row. - */ - private boolean walkForwardInSingleRow(final SortedSet set, - final Cell firstOnRow, final GetClosestRowBeforeTracker state) { - boolean foundCandidate = false; - SortedSet tail = set.tailSet(firstOnRow); - if (tail.isEmpty()) return foundCandidate; - for (Iterator i = tail.iterator(); i.hasNext();) { - Cell kv = i.next(); - // Did we go beyond the target row? If so break. - if (state.isTooFar(kv, firstOnRow)) break; - if (state.isExpired(kv)) { - i.remove(); - continue; - } - // If we added something, this row is a contender. break. - if (state.handle(kv)) { - foundCandidate = true; - break; - } - } - return foundCandidate; - } - - /* - * Walk backwards through the passed set a row at a time until we run out of - * set or until we get a candidate. - * @param set - * @param state - */ - private void getRowKeyBefore(NavigableSet set, - final GetClosestRowBeforeTracker state) { - Cell firstOnRow = state.getTargetKey(); - for (Member p = memberOfPreviousRow(set, state, firstOnRow); - p != null; p = memberOfPreviousRow(p.set, state, firstOnRow)) { - // Make sure we don't fall out of our table. - if (!state.isTargetTable(p.cell)) break; - // Stop looking if we've exited the better candidate range. - if (!state.isBetterCandidate(p.cell)) break; - // Make into firstOnRow - firstOnRow = new KeyValue(p.cell.getRowArray(), p.cell.getRowOffset(), p.cell.getRowLength(), - HConstants.LATEST_TIMESTAMP); - // If we find something, break; - if (walkForwardInSingleRow(p.set, firstOnRow, state)) break; - } - } - - /** * Only used by tests. TODO: Remove * * Given the specs of a column, update it, first by inserting a new record, @@ -622,42 +542,6 @@ public class DefaultMemStore implements MemStore { return addedSize; } - /* - * Immutable data structure to hold member found in set and the set it was - * found in. Include set because it is carrying context. - */ - private static class Member { - final Cell cell; - final NavigableSet set; - Member(final NavigableSet s, final Cell kv) { - this.cell = kv; - this.set = s; - } - } - - /* - * @param set Set to walk back in. Pass a first in row or we'll return - * same row (loop). - * @param state Utility and context. - * @param firstOnRow First item on the row after the one we want to find a - * member in. - * @return Null or member of row previous to firstOnRow - */ - private Member memberOfPreviousRow(NavigableSet set, - final GetClosestRowBeforeTracker state, final Cell firstOnRow) { - NavigableSet head = set.headSet(firstOnRow, false); - if (head.isEmpty()) return null; - for (Iterator i = head.descendingIterator(); i.hasNext();) { - Cell found = i.next(); - if (state.isExpired(found)) { - i.remove(); - continue; - } - return new Member(head, found); - } - return null; - } - /** * @return scanner on memstore and snapshot in this order. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java deleted file mode 100644 index 2df4286..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import java.util.NavigableMap; -import java.util.NavigableSet; -import java.util.TreeMap; -import java.util.TreeSet; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * State and utility processing {@link HRegion#getClosestRowBefore(byte[], byte[])}. - * Like {@link ScanQueryMatcher} and {@link ScanDeleteTracker} but does not - * implement the {@link DeleteTracker} interface since state spans rows (There - * is no update nor reset method). - */ -@InterfaceAudience.Private -class GetClosestRowBeforeTracker { - private final KeyValue targetkey; - // Any cell w/ a ts older than this is expired. - private final long now; - private final long oldestUnexpiredTs; - private Cell candidate = null; - private final CellComparator cellComparator; - // Flag for whether we're doing getclosest on a metaregion. - private final boolean metaregion; - // Offset and length into targetkey demarking table name (if in a metaregion). - private final int rowoffset; - private final int tablenamePlusDelimiterLength; - - // Deletes keyed by row. Comparator compares on row portion of KeyValue only. - private final NavigableMap> deletes; - - /** - * @param c - * @param kv Presume first on row: i.e. empty column, maximum timestamp and - * a type of Type.Maximum - * @param ttl Time to live in ms for this Store - * @param metaregion True if this is hbase:meta or -ROOT- region. - */ - GetClosestRowBeforeTracker(final CellComparator c, final KeyValue kv, - final long ttl, final boolean metaregion) { - super(); - this.metaregion = metaregion; - this.targetkey = kv; - // If we are in a metaregion, then our table name is the prefix on the - // targetkey. - this.rowoffset = kv.getRowOffset(); - int l = -1; - if (metaregion) { - l = Bytes.searchDelimiterIndex(kv.getRowArray(), rowoffset, kv.getRowLength(), - HConstants.DELIMITER) - this.rowoffset; - } - this.tablenamePlusDelimiterLength = metaregion? l + 1: -1; - this.now = System.currentTimeMillis(); - this.oldestUnexpiredTs = now - ttl; - this.cellComparator = c; - this.deletes = new TreeMap>(new CellComparator.RowComparator()); - } - - /* - * Add the specified KeyValue to the list of deletes. - * @param kv - */ - private void addDelete(final Cell kv) { - NavigableSet rowdeletes = this.deletes.get(kv); - if (rowdeletes == null) { - rowdeletes = new TreeSet(this.cellComparator); - this.deletes.put(kv, rowdeletes); - } - rowdeletes.add(kv); - } - - /* - * @param kv Adds candidate if nearer the target than previous candidate. - * @return True if updated candidate. - */ - private boolean addCandidate(final Cell kv) { - if (!isDeleted(kv) && isBetterCandidate(kv)) { - this.candidate = kv; - return true; - } - return false; - } - - boolean isBetterCandidate(final Cell contender) { - return this.candidate == null || - (this.cellComparator.compareRows(this.candidate, contender) < 0 && - this.cellComparator.compareRows(contender, this.targetkey) <= 0); - } - - /* - * Check if specified KeyValue buffer has been deleted by a previously - * seen delete. - * @param kv - * @return true is the specified KeyValue is deleted, false if not - */ - private boolean isDeleted(final Cell kv) { - if (this.deletes.isEmpty()) return false; - NavigableSet rowdeletes = this.deletes.get(kv); - if (rowdeletes == null || rowdeletes.isEmpty()) return false; - return isDeleted(kv, rowdeletes); - } - - /** - * Check if the specified KeyValue buffer has been deleted by a previously - * seen delete. - * @param kv - * @param ds - * @return True is the specified KeyValue is deleted, false if not - */ - public boolean isDeleted(final Cell kv, final NavigableSet ds) { - if (deletes == null || deletes.isEmpty()) return false; - for (Cell d: ds) { - long kvts = kv.getTimestamp(); - long dts = d.getTimestamp(); - if (CellUtil.isDeleteFamily(d)) { - if (kvts <= dts) return true; - continue; - } - // Check column - int ret = CellComparator.compareQualifiers(kv, d); - if (ret <= -1) { - // This delete is for an earlier column. - continue; - } else if (ret >= 1) { - // Beyond this kv. - break; - } - // Check Timestamp - if (kvts > dts) return false; - - // Check Type - switch (KeyValue.Type.codeToType(d.getTypeByte())) { - case Delete: return kvts == dts; - case DeleteColumn: return true; - default: continue; - } - } - return false; - } - - /** - * @param cell - * @return true if the cell is expired - */ - public boolean isExpired(final Cell cell) { - return cell.getTimestamp() < this.oldestUnexpiredTs || - HStore.isCellTTLExpired(cell, this.oldestUnexpiredTs, this.now); - } - - /* - * Handle keys whose values hold deletes. - * Add to the set of deletes and then if the candidate keys contain any that - * might match, then check for a match and remove it. Implies candidates - * is made with a Comparator that ignores key type. - * @param kv - * @return True if we removed k from candidates. - */ - boolean handleDeletes(final Cell kv) { - addDelete(kv); - boolean deleted = false; - if (!hasCandidate()) return deleted; - if (isDeleted(this.candidate)) { - this.candidate = null; - deleted = true; - } - return deleted; - } - - /** - * Do right thing with passed key, add to deletes or add to candidates. - * @param kv - * @return True if we added a candidate - */ - boolean handle(final Cell kv) { - if (CellUtil.isDelete(kv)) { - handleDeletes(kv); - return false; - } - return addCandidate(kv); - } - - /** - * @return True if has candidate - */ - public boolean hasCandidate() { - return this.candidate != null; - } - - /** - * @return Best candidate or null. - */ - public Cell getCandidate() { - return this.candidate; - } - - public KeyValue getTargetKey() { - return this.targetkey; - } - - /** - * @param kv Current kv - * @param firstOnRow on row kv. - * @return True if we went too far, past the target key. - */ - boolean isTooFar(final Cell kv, final Cell firstOnRow) { - return this.cellComparator.compareRows(kv, firstOnRow) > 0; - } - - boolean isTargetTable(final Cell kv) { - if (!metaregion) return true; - // Compare start of keys row. Compare including delimiter. Saves having - // to calculate where tablename ends in the candidate kv. - return Bytes.compareTo(this.targetkey.getRowArray(), this.rowoffset, - this.tablenamePlusDelimiterLength, - kv.getRowArray(), kv.getRowOffset(), this.tablenamePlusDelimiterLength) == 0; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 2fd7ebd..3b23f2a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2423,38 +2423,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi ////////////////////////////////////////////////////////////////////////////// @Override - public Result getClosestRowBefore(final byte [] row, final byte [] family) throws IOException { - if (coprocessorHost != null) { - Result result = new Result(); - if (coprocessorHost.preGetClosestRowBefore(row, family, result)) { - return result; - } - } - // look across all the HStores for this region and determine what the - // closest key is across all column families, since the data may be sparse - checkRow(row, "getClosestRowBefore"); - startRegionOperation(Operation.GET); - this.readRequestsCount.increment(); - try { - Store store = getStore(family); - // get the closest key. (HStore.getRowKeyAtOrBefore can return null) - Cell key = store.getRowKeyAtOrBefore(row); - Result result = null; - if (key != null) { - Get get = new Get(CellUtil.cloneRow(key)); - get.addFamily(family); - result = get(get); - } - if (coprocessorHost != null) { - coprocessorHost.postGetClosestRowBefore(row, family, result); - } - return result; - } finally { - closeRegionOperation(Operation.GET); - } - } - - @Override public RegionScanner getScanner(Scan scan) throws IOException { return getScanner(scan, null); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 57ca3f1..07d51c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1775,154 +1775,6 @@ public class HStore implements Store { } @Override - public Cell getRowKeyAtOrBefore(final byte[] row) throws IOException { - // If minVersions is set, we will not ignore expired KVs. - // As we're only looking for the latest matches, that should be OK. - // With minVersions > 0 we guarantee that any KV that has any version - // at all (expired or not) has at least one version that will not expire. - // Note that this method used to take a KeyValue as arguments. KeyValue - // can be back-dated, a row key cannot. - long ttlToUse = scanInfo.getMinVersions() > 0 ? Long.MAX_VALUE : this.scanInfo.getTtl(); - - KeyValue kv = new KeyValue(row, HConstants.LATEST_TIMESTAMP); - - GetClosestRowBeforeTracker state = new GetClosestRowBeforeTracker( - this.comparator, kv, ttlToUse, this.getRegionInfo().isMetaRegion()); - this.lock.readLock().lock(); - try { - // First go to the memstore. Pick up deletes and candidates. - this.memstore.getRowKeyAtOrBefore(state); - // Check if match, if we got a candidate on the asked for 'kv' row. - // Process each relevant store file. Run through from newest to oldest. - Iterator sfIterator = this.storeEngine.getStoreFileManager() - .getCandidateFilesForRowKeyBefore(state.getTargetKey()); - while (sfIterator.hasNext()) { - StoreFile sf = sfIterator.next(); - sfIterator.remove(); // Remove sf from iterator. - boolean haveNewCandidate = rowAtOrBeforeFromStoreFile(sf, state); - Cell candidate = state.getCandidate(); - // we have an optimization here which stops the search if we find exact match. - if (candidate != null && CellUtil.matchingRow(candidate, row)) { - return candidate; - } - if (haveNewCandidate) { - sfIterator = this.storeEngine.getStoreFileManager().updateCandidateFilesForRowKeyBefore( - sfIterator, state.getTargetKey(), candidate); - } - } - return state.getCandidate(); - } finally { - this.lock.readLock().unlock(); - } - } - - /* - * Check an individual MapFile for the row at or before a given row. - * @param f - * @param state - * @throws IOException - * @return True iff the candidate has been updated in the state. - */ - private boolean rowAtOrBeforeFromStoreFile(final StoreFile f, - final GetClosestRowBeforeTracker state) - throws IOException { - StoreFile.Reader r = f.getReader(); - if (r == null) { - LOG.warn("StoreFile " + f + " has a null Reader"); - return false; - } - if (r.getEntries() == 0) { - LOG.warn("StoreFile " + f + " is a empty store file"); - return false; - } - // TODO: Cache these keys rather than make each time? - Cell firstKV = r.getFirstKey(); - if (firstKV == null) return false; - Cell lastKV = r.getLastKey(); - Cell firstOnRow = state.getTargetKey(); - if (this.comparator.compareRows(lastKV, firstOnRow) < 0) { - // If last key in file is not of the target table, no candidates in this - // file. Return. - if (!state.isTargetTable(lastKV)) return false; - // If the row we're looking for is past the end of file, set search key to - // last key. TODO: Cache last and first key rather than make each time. - firstOnRow = CellUtil.createFirstOnRow(lastKV); - } - // Get a scanner that caches blocks and that uses pread. - HFileScanner scanner = r.getScanner(true, true, false); - // Seek scanner. If can't seek it, return. - if (!seekToScanner(scanner, firstOnRow, firstKV)) return false; - // If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN! - // Unlikely that there'll be an instance of actual first row in table. - if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true; - // If here, need to start backing up. - while (scanner.seekBefore(firstOnRow)) { - Cell kv = scanner.getCell(); - if (!state.isTargetTable(kv)) break; - if (!state.isBetterCandidate(kv)) break; - // Make new first on row. - firstOnRow = CellUtil.createFirstOnRow(kv); - // Seek scanner. If can't seek it, break. - if (!seekToScanner(scanner, firstOnRow, firstKV)) return false; - // If we find something, break; - if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true; - } - return false; - } - - /* - * Seek the file scanner to firstOnRow or first entry in file. - * @param scanner - * @param firstOnRow - * @param firstKV - * @return True if we successfully seeked scanner. - * @throws IOException - */ - private boolean seekToScanner(final HFileScanner scanner, - final Cell firstOnRow, - final Cell firstKV) - throws IOException { - Cell kv = firstOnRow; - // If firstOnRow < firstKV, set to firstKV - if (this.comparator.compareRows(firstKV, firstOnRow) == 0) kv = firstKV; - int result = scanner.seekTo(kv); - return result != -1; - } - - /* - * When we come in here, we are probably at the kv just before we break into - * the row that firstOnRow is on. Usually need to increment one time to get - * on to the row we are interested in. - * @param scanner - * @param firstOnRow - * @param state - * @return True we found a candidate. - * @throws IOException - */ - private boolean walkForwardInSingleRow(final HFileScanner scanner, - final Cell firstOnRow, - final GetClosestRowBeforeTracker state) - throws IOException { - boolean foundCandidate = false; - do { - Cell kv = scanner.getCell(); - // If we are not in the row, skip. - if (this.comparator.compareRows(kv, firstOnRow) < 0) continue; - // Did we go beyond the target row? If so break. - if (state.isTooFar(kv, firstOnRow)) break; - if (state.isExpired(kv)) { - continue; - } - // If we added something, this row is a contender. break. - if (state.handle(kv)) { - foundCandidate = true; - break; - } - } while(scanner.next()); - return foundCandidate; - } - - @Override public boolean canSplit() { this.lock.readLock().lock(); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index 364b9c9..d24299d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -93,13 +93,6 @@ public interface MemStore extends HeapSize { long delete(final Cell deleteCell); /** - * Find the key that matches row exactly, or the one that immediately precedes it. The - * target row key is set in state. - * @param state column/delete tracking state - */ - void getRowKeyAtOrBefore(final GetClosestRowBeforeTracker state); - - /** * Given the specs of a column, update it, first by inserting a new record, * then removing the old one. Since there is only 1 KeyValue involved, the memstoreTS * will be set to 0, thus ensuring that they instantly appear to anyone. The underlying diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index d7be4b4..5f31086 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1933,32 +1933,21 @@ public class RSRpcServices implements HBaseRPCErrorHandler, quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.GET); - if (get.hasClosestRowBefore() && get.getClosestRowBefore()) { - if (get.getColumnCount() != 1) { - throw new DoNotRetryIOException( - "get ClosestRowBefore supports one and only one family now, not " - + get.getColumnCount() + " families"); - } - byte[] row = get.getRow().toByteArray(); - byte[] family = get.getColumn(0).getFamily().toByteArray(); - r = region.getClosestRowBefore(row, family); - } else { - Get clientGet = ProtobufUtil.toGet(get); - if (get.getExistenceOnly() && region.getCoprocessorHost() != null) { - existence = region.getCoprocessorHost().preExists(clientGet); - } - if (existence == null) { - r = region.get(clientGet); - if (get.getExistenceOnly()) { - boolean exists = r.getExists(); - if (region.getCoprocessorHost() != null) { - exists = region.getCoprocessorHost().postExists(clientGet, exists); - } - existence = exists; + Get clientGet = ProtobufUtil.toGet(get); + if (get.getExistenceOnly() && region.getCoprocessorHost() != null) { + existence = region.getCoprocessorHost().preExists(clientGet); + } + if (existence == null) { + r = region.get(clientGet); + if (get.getExistenceOnly()) { + boolean exists = r.getExists(); + if (region.getCoprocessorHost() != null) { + exists = region.getCoprocessorHost().postExists(clientGet, exists); } + existence = exists; } } - if (existence != null){ + if (existence != null) { ClientProtos.Result pbr = ProtobufUtil.toResult(existence, region.getRegionInfo().getReplicaId() != 0); builder.setResult(pbr); @@ -1974,8 +1963,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, throw new ServiceException(ie); } finally { if (regionServer.metricsRegionServer != null) { - regionServer.metricsRegionServer.updateGet( - EnvironmentEdgeManager.currentTime() - before); + regionServer.metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTime() - before); } if (quota != null) { quota.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 9da99ab..6d87057 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -381,17 +381,6 @@ public interface Region extends ConfigurationObserver { List get(Get get, boolean withCoprocessor) throws IOException; /** - * Return all the data for the row that matches row exactly, - * or the one that immediately preceeds it, at or immediately before - * ts. - * @param row - * @param family - * @return result of the operation - * @throws IOException - */ - Result getClosestRowBefore(byte[] row, byte[] family) throws IOException; - - /** * Return an iterator that scans over the HRegion, returning the indicated * columns and rows specified by the {@link Scan}. *

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 85d4b64..13b8317 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -783,41 +783,6 @@ public class RegionCoprocessorHost // RegionObserver support /** - * @param row the row key - * @param family the family - * @param result the result set from the region - * @return true if default processing should be bypassed - * @exception IOException Exception - */ - public boolean preGetClosestRowBefore(final byte[] row, final byte[] family, - final Result result) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preGetClosestRowBefore(ctx, row, family, result); - } - }); - } - - /** - * @param row the row key - * @param family the family - * @param result the result set from the region - * @exception IOException Exception - */ - public void postGetClosestRowBefore(final byte[] row, final byte[] family, - final Result result) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postGetClosestRowBefore(ctx, row, family, result); - } - }); - } - - /** * @param get the Get request * @return true if default processing should be bypassed * @exception IOException Exception diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 46fce67..edc166e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -143,20 +143,6 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf */ void rollback(final Cell cell); - /** - * Find the key that matches row exactly, or the one that immediately precedes it. WARNING: - * Only use this method on a table where writes occur with strictly increasing timestamps. This - * method assumes this pattern of writes in order to make it reasonably performant. Also our - * search is dependent on the axiom that deletes are for cells that are in the container that - * follows whether a memstore snapshot or a storefile, not for the current container: i.e. we'll - * see deletes before we come across cells we are to delete. Presumption is that the - * memstore#kvset is processed before memstore#snapshot and so on. - * @param row The row key of the targeted row. - * @return Found Cell or null if none found. - * @throws IOException - */ - Cell getRowKeyAtOrBefore(final byte[] row) throws IOException; - FileSystem getFileSystem(); /* diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 8bd69a2..275d4da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -661,7 +661,6 @@ public class AccessController extends BaseMasterAndRegionObserver } private enum OpType { - GET_CLOSEST_ROW_BEFORE("getClosestRowBefore"), GET("get"), EXISTS("exists"), SCAN("scan"), @@ -1424,28 +1423,6 @@ public class AccessController extends BaseMasterAndRegionObserver return scanner; } - @Override - public void preGetClosestRowBefore(final ObserverContext c, - final byte [] row, final byte [] family, final Result result) - throws IOException { - assert family != null; - RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = makeFamilyMap(family, null); - User user = getActiveUser(); - AuthResult authResult = permissionGranted(OpType.GET_CLOSEST_ROW_BEFORE, user, env, families, - Action.READ); - if (!authResult.isAllowed() && cellFeaturesEnabled && !compatibleEarlyTermination) { - authResult.setAllowed(checkCoveringPermission(OpType.GET_CLOSEST_ROW_BEFORE, env, row, - families, HConstants.LATEST_TIMESTAMP, Action.READ)); - authResult.setReason("Covering cell set"); - } - logResult(authResult); - if (authorizationEnabled && !authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); - } - } - private void internalPreRead(final ObserverContext c, final Query query, OpType opType) throws IOException { Filter filter = query.getFilter(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index e8b79a8..abc0647 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -93,6 +93,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; @@ -4143,4 +4144,28 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]); } + + public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException { + Scan scan = new Scan(row); + scan.setSmall(true); + scan.setCaching(1); + scan.setReversed(true); + scan.addFamily(family); + try (RegionScanner scanner = r.getScanner(scan)) { + List cells = new ArrayList(1); + scanner.next(cells); + if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) { + return null; + } + return Result.create(cells); + } + } + + private boolean isTargetTable(final byte[] inRow, Cell c) { + String inputRowString = Bytes.toString(inRow); + int i = inputRowString.indexOf(HConstants.DELIMITER); + String outputRowString = Bytes.toString(c.getRowArray(), c.getRowOffset(), c.getRowLength()); + int o = outputRowString.indexOf(HConstants.DELIMITER); + return inputRowString.substring(0, i).equals(outputRowString.substring(0, o)); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 0dd8bea..dfd1c0d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -4207,83 +4207,82 @@ public class TestFromClientSide { region.flush(true); Result result; - Get get = null; // Test before first that null is returned - get = new Get(beforeFirstRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); - assertTrue(result.isEmpty()); + Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(beforeFirstRow); + scan.addFamily(HConstants.CATALOG_FAMILY); + ResultScanner scanner = table.getScanner(scan); + result = scanner.next(); + assertNull(result); // Test at first that first is returned - get = new Get(firstRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + scan = Scan.createGetClosestRowOrBeforeReverseScan(firstRow); + scan.addFamily(HConstants.CATALOG_FAMILY); + scanner = table.getScanner(scan); + result = scanner.next(); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), firstRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); // Test in between first and second that first is returned - get = new Get(beforeSecondRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + scan = Scan.createGetClosestRowOrBeforeReverseScan(beforeSecondRow); + scan.addFamily(HConstants.CATALOG_FAMILY); + scanner = table.getScanner(scan); + result = scanner.next(); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), firstRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); // Test at second make sure second is returned - get = new Get(secondRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + scan = Scan.createGetClosestRowOrBeforeReverseScan(secondRow); + scan.addFamily(HConstants.CATALOG_FAMILY); + scanner = table.getScanner(scan); + result = scanner.next(); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), secondRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); // Test in second and third, make sure second is returned - get = new Get(beforeThirdRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + scan = Scan.createGetClosestRowOrBeforeReverseScan(beforeThirdRow); + scan.addFamily(HConstants.CATALOG_FAMILY); + scanner = table.getScanner(scan); + result = scanner.next(); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), secondRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); // Test at third make sure third is returned - get = new Get(thirdRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + scan = Scan.createGetClosestRowOrBeforeReverseScan(thirdRow); + scan.addFamily(HConstants.CATALOG_FAMILY); + scanner = table.getScanner(scan); + result = scanner.next(); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), thirdRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); // Test in third and forth, make sure third is returned - get = new Get(beforeForthRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + scan = Scan.createGetClosestRowOrBeforeReverseScan(beforeForthRow); + scan.addFamily(HConstants.CATALOG_FAMILY); + scanner = table.getScanner(scan); + result = scanner.next(); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), thirdRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); // Test at forth make sure forth is returned - get = new Get(forthRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + scan = Scan.createGetClosestRowOrBeforeReverseScan(forthRow); + scan.addFamily(HConstants.CATALOG_FAMILY); + scanner = table.getScanner(scan); + result = scanner.next(); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), forthRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); // Test after forth make sure forth is returned - get = new Get(Bytes.add(forthRow, one)); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + scan = Scan.createGetClosestRowOrBeforeReverseScan(Bytes.add(forthRow, one)); + scan.addFamily(HConstants.CATALOG_FAMILY); + scanner = table.getScanner(scan); + result = scanner.next(); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), forthRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java index d92d301..dbb1cd1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java @@ -85,7 +85,6 @@ public class TestFromClientSideNoCodec { // Check getRowOrBefore byte[] f = fs[0]; Get get = new Get(row); - get.setClosestRowBefore(true); get.addFamily(f); r = ht.get(get); assertTrue(r.toString(), r.containsColumn(f, f)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index 6707354..14b43df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -102,8 +102,6 @@ public class SimpleRegionObserver extends BaseRegionObserver { final AtomicInteger ctPreDeleted = new AtomicInteger(0); final AtomicInteger ctPrePrepareDeleteTS = new AtomicInteger(0); final AtomicInteger ctPostDeleted = new AtomicInteger(0); - final AtomicInteger ctPreGetClosestRowBefore = new AtomicInteger(0); - final AtomicInteger ctPostGetClosestRowBefore = new AtomicInteger(0); final AtomicInteger ctPreIncrement = new AtomicInteger(0); final AtomicInteger ctPreIncrementAfterRowLock = new AtomicInteger(0); final AtomicInteger ctPreAppend = new AtomicInteger(0); @@ -518,32 +516,6 @@ public class SimpleRegionObserver extends BaseRegionObserver { } @Override - public void preGetClosestRowBefore(final ObserverContext c, - final byte[] row, final byte[] family, final Result result) - throws IOException { - RegionCoprocessorEnvironment e = c.getEnvironment(); - assertNotNull(e); - assertNotNull(e.getRegion()); - assertNotNull(row); - assertNotNull(result); - if (ctBeforeDelete.get() > 0) { - ctPreGetClosestRowBefore.incrementAndGet(); - } - } - - @Override - public void postGetClosestRowBefore(final ObserverContext c, - final byte[] row, final byte[] family, final Result result) - throws IOException { - RegionCoprocessorEnvironment e = c.getEnvironment(); - assertNotNull(e); - assertNotNull(e.getRegion()); - assertNotNull(row); - assertNotNull(result); - ctPostGetClosestRowBefore.incrementAndGet(); - } - - @Override public Result preIncrement(final ObserverContext c, final Increment increment) throws IOException { ctPreIncrement.incrementAndGet(); @@ -940,14 +912,6 @@ public class SimpleRegionObserver extends BaseRegionObserver { return ctPostDeleted.get(); } - public int getCtPreGetClosestRowBefore() { - return ctPreGetClosestRowBefore.get(); - } - - public int getCtPostGetClosestRowBefore() { - return ctPostGetClosestRowBefore.get(); - } - public int getCtPreIncrement() { return ctPreIncrement.get(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 418e2fc..470a453 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; @@ -66,6 +65,8 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { private static final byte[] T35 = Bytes.toBytes("035"); private static final byte[] T40 = Bytes.toBytes("040"); + private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); + @Test @@ -160,7 +161,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { tableb, tofindBytes, HConstants.NINES, false); LOG.info("find=" + new String(metaKey)); - Result r = mr.getClosestRowBefore(metaKey, HConstants.CATALOG_FAMILY); + Result r = UTIL.getClosestRowBefore(mr, metaKey, HConstants.CATALOG_FAMILY); if (answer == -1) { assertNull(r); return null; @@ -206,38 +207,38 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { p.add(c0, c0, T20); region.put(p); - Result r = region.getClosestRowBefore(T20, c0); + Result r = UTIL.getClosestRowBefore(region, T20, c0); assertTrue(Bytes.equals(T20, r.getRow())); Delete d = new Delete(T20); d.deleteColumn(c0, c0); region.delete(d); - r = region.getClosestRowBefore(T20, c0); + r = UTIL.getClosestRowBefore(region, T20, c0); assertTrue(Bytes.equals(T10, r.getRow())); p = new Put(T30); p.add(c0, c0, T30); region.put(p); - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T30, r.getRow())); d = new Delete(T30); d.deleteColumn(c0, c0); region.delete(d); - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); region.flush(true); // try finding "010" after flush - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); // Put into a different column family. Should make it so I still get t10 @@ -245,16 +246,16 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { p.add(c1, c1, T20); region.put(p); - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); region.flush(true); - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); // Now try combo of memcache and mapfiles. Delete the t20 COLUMS[1] @@ -262,14 +263,14 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { d = new Delete(T20); d.deleteColumn(c1, c1); region.delete(d); - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); // Ask for a value off the end of the file. Should return t10. - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); region.flush(true); - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); // Ok. Let the candidate come out of hfile but have delete of @@ -279,7 +280,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { region.put(p); d = new Delete(T10); d.deleteColumn(c1, c1); - r = region.getClosestRowBefore(T12, c0); + r = UTIL.getClosestRowBefore(region, T12, c0); assertTrue(Bytes.equals(T11, r.getRow())); } finally { if (region != null) { @@ -316,13 +317,13 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { region.put(p); // try finding "035" - Result r = region.getClosestRowBefore(T35, c0); + Result r = UTIL.getClosestRowBefore(region, T35, c0); assertTrue(Bytes.equals(T30, r.getRow())); region.flush(true); // try finding "035" - r = region.getClosestRowBefore(T35, c0); + r = UTIL.getClosestRowBefore(region, T35, c0); assertTrue(Bytes.equals(T30, r.getRow())); p = new Put(T20); @@ -330,13 +331,13 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { region.put(p); // try finding "035" - r = region.getClosestRowBefore(T35, c0); + r = UTIL.getClosestRowBefore(region, T35, c0); assertTrue(Bytes.equals(T30, r.getRow())); region.flush(true); // try finding "035" - r = region.getClosestRowBefore(T35, c0); + r = UTIL.getClosestRowBefore(region, T35, c0); assertTrue(Bytes.equals(T30, r.getRow())); } finally { if (region != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java index 7f8a20b..cd84eac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java @@ -89,20 +89,20 @@ public class TestMinVersions { // now make sure that getClosestBefore(...) get can // rows that would be expired without minVersion. // also make sure it gets the latest version - Result r = region.getClosestRowBefore(T1, c0); + Result r = hbu.getClosestRowBefore(region, T1, c0); checkResult(r, c0, T4); - r = region.getClosestRowBefore(T2, c0); + r = hbu.getClosestRowBefore(region, T2, c0); checkResult(r, c0, T4); // now flush/compact region.flush(true); region.compact(true); - r = region.getClosestRowBefore(T1, c0); + r = hbu.getClosestRowBefore(region, T1, c0); checkResult(r, c0, T4); - r = region.getClosestRowBefore(T2, c0); + r = hbu.getClosestRowBefore(region, T2, c0); checkResult(r, c0, T4); } finally { HBaseTestingUtility.closeRegionAndWAL(region); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java index bd8cf64..8fd3f67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java @@ -879,16 +879,6 @@ public class TestWithDisabledAuthorization extends SecureTestUtil { } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); - // preGetClosestRowBefore - verifyAllowed(new AccessTestAction() { - @Override - public Object run() throws Exception { - ACCESS_CONTROLLER.preGetClosestRowBefore(ObserverContext.createAndPrepare(RCP_ENV, null), - TEST_ROW, TEST_FAMILY, new Result()); - return null; - } - }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); - // preGetOp verifyAllowed(new AccessTestAction() { @Override -- 1.9.2.msysgit.0