Index: src/test/java/org/apache/hadoop/hbase/io/TestHBASE5997.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/io/TestHBASE5997.java (revision 0) +++ src/test/java/org/apache/hadoop/hbase/io/TestHBASE5997.java (working copy) @@ -0,0 +1,97 @@ +package org.apache.hadoop.hbase.io; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestHBASE5997 { + private static final byte[] TABLE = "t1".getBytes(); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final byte[] FAMILY = "cf1".getBytes(); + private static final byte[] QUALIFIER = "q1".getBytes(); + private static final byte[] VALUE = "value".getBytes(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, + TestRegionObserver.class.getName()); + conf.setInt("hbase.client.retries.number", 1); + // We need more than one region server in this test + TEST_UTIL.startMiniCluster(1); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testHalfScannerSeekBefore() throws Exception { + final HTable table = TEST_UTIL.createTable(TABLE, FAMILY); + addPut("r1", table); + addPut("r2", table); + addPut("r4", table); + addPut("r45", table); + + final AtomicInteger count = new AtomicInteger(0); + Thread t = new Thread("CheckForSplit") { + public void run() { + for (int i = 0; i < 20; i++) { + try { + sleep(1000); + } catch (InterruptedException e) { + continue; + } + // check again table = new HTable(conf, tableName); + Map regions = null; + try { + regions = table.getRegionsInfo(); + } catch (IOException e) { + e.printStackTrace(); + } + if (regions == null) continue; + count.set(regions.size()); + if (count.get() >= 2) break; + } + } + }; + t.start(); + // Split the table regions [,r3) and [r3,) + TEST_UTIL.getHBaseAdmin().split(TABLE, "r3".getBytes()); + t.join(); + + table.clearRegionCache(); + + Map regions = null; + try { + regions = table.getRegionsInfo(); + } catch (IOException e) { + } + assertEquals(2, regions.size()); + + // Now the region is split.. Do seekBefore with splitKey + table.getRowOrBefore("r3".getBytes(), FAMILY); + } + + private void addPut(String rowkey, HTable table) throws IOException { + Put put = new Put(rowkey.getBytes()); + put.add(FAMILY, QUALIFIER, VALUE); + table.put(put); + } + + +} Index: src/test/java/org/apache/hadoop/hbase/io/TestRegionObserver.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/io/TestRegionObserver.java (revision 0) +++ src/test/java/org/apache/hadoop/hbase/io/TestRegionObserver.java (working copy) @@ -0,0 +1,22 @@ +package org.apache.hadoop.hbase.io; + +import java.util.List; + +import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.regionserver.Store; +import org.apache.hadoop.hbase.regionserver.StoreFile; + +public class TestRegionObserver extends BaseRegionObserver { + + @Override + public void preCompactSelection(ObserverContext c, Store store, + List candidates) { + // We want to make sure that the compaction after the split should not physically split the HFile of the parent into 2 + // We want the HalfStoreFile readers to be present.... + // Remove contents in the candidates and set context to bypass + candidates.clear(); + c.bypass(); + } +}