From 21b6972ede3366f88f8d279685ada09f2fe63a75 Mon Sep 17 00:00:00 2001 From: Phil Yang Date: Wed, 30 Mar 2016 20:33:46 +0800 Subject: [PATCH] HBASE-15485 Filter.reset() should not be called between batches --- .../apache/hadoop/hbase/regionserver/HRegion.java | 2 +- .../hadoop/hbase/regionserver/ScannerContext.java | 9 ++ .../hbase/filter/TestFilterFromClientSide.java | 128 +++++++++++++++++++++ 3 files changed, 138 insertions(+), 1 deletion(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromClientSide.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 4da0f13..e78e5da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -5652,7 +5652,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // partial Result means that we should not reset the filters; filters // should only be reset in // between rows - if (!scannerContext.partialResultFormed()) resetFilters(); + if (!scannerContext.betweenCellsResultFormed()) resetFilters(); if (isFilterDoneInternal()) { moreValues = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java index 6674443..d616f65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java @@ -230,6 +230,15 @@ public class ScannerContext { } /** + * @return true when a mid-row result is formed. + */ + boolean betweenCellsResultFormed() { + return scannerState == NextState.SIZE_LIMIT_REACHED_MID_ROW + || scannerState == NextState.TIME_LIMIT_REACHED_MID_ROW + || scannerState == NextState.BATCH_LIMIT_REACHED; + } + + /** * @param checkerScope * @return true if the batch limit can be enforced in the checker's scope */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromClientSide.java new file mode 100644 index 0000000..595bb01 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromClientSide.java @@ -0,0 +1,128 @@ +package org.apache.hadoop.hbase.filter; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTestConst; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * To test behavior of filters at server from client side. + */ +@Category(MediumTests.class) +public class TestFilterFromClientSide { + + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private final static int MINICLUSTER_SIZE = 5; + private static Table TABLE = null; + + /** + * Table configuration + */ + private static TableName TABLE_NAME = TableName.valueOf("testTable"); + + private static int NUM_ROWS = 5; + private static byte[] ROW = Bytes.toBytes("testRow"); + private static byte[][] ROWS = HTestConst.makeNAscii(ROW, NUM_ROWS); + + // Should keep this value below 10 to keep generation of expected kv's simple. If above 10 then + // table/row/cf1/... will be followed by table/row/cf10/... instead of table/row/cf2/... which + // breaks the simple generation of expected kv's + private static int NUM_FAMILIES = 10; + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static byte[][] FAMILIES = HTestConst.makeNAscii(FAMILY, NUM_FAMILIES); + + private static int NUM_QUALIFIERS = 10; + private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, NUM_QUALIFIERS); + + private static int VALUE_SIZE = 1024; + private static byte[] VALUE = Bytes.createMaxByteArray(VALUE_SIZE); + + private static int NUM_COLS = NUM_FAMILIES * NUM_QUALIFIERS; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(MINICLUSTER_SIZE); + TEST_UTIL.getAdmin().setBalancerRunning(false, true); + TABLE = createTestTable(TABLE_NAME, ROWS, FAMILIES, QUALIFIERS, VALUE); + } + + private static Table createTestTable(TableName name, byte[][] rows, byte[][] families, + byte[][] qualifiers, byte[] cellValue) throws IOException { + Table ht = TEST_UTIL.createTable(name, families); + List puts = createPuts(rows, families, qualifiers, cellValue); + ht.put(puts); + + return ht; + } + /** + * Make puts to put the input value into each combination of row, family, and qualifier + * @param rows + * @param families + * @param qualifiers + * @param value + * @return + * @throws IOException + */ + private static ArrayList createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers, + byte[] value) throws IOException { + Put put; + ArrayList puts = new ArrayList<>(); + + for (byte[] row1 : rows) { + put = new Put(row1); + for (byte[] family : families) { + for (int qual = 0; qual < qualifiers.length; qual++) { + KeyValue kv = new KeyValue(row1, family, qualifiers[qual], qual, value); + put.add(kv); + } + } + puts.add(put); + } + + return puts; + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testFirstKeyOnlyFiterAndBatch() throws IOException { + Scan scan = new Scan(); + scan.setFilter(new FirstKeyOnlyFilter()); + scan.setBatch(1); + ResultScanner scanner = TABLE.getScanner(scan); + for (int i = 0; i < NUM_ROWS; i++) { + Result result = scanner.next(); + assertEquals(1,result.size()); + Cell cell = result.rawCells()[0]; + assertArrayEquals(ROWS[i], + Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + } + assertNull(scanner.next()); + scanner.close(); + } + +} -- 2.6.4 (Apple Git-63)