Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java (revision 1441701) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java (working copy) @@ -547,14 +547,14 @@ List scanners = scanFixture(kvs); Scan scan = new Scan(); scan.setMaxVersions(2); - HStore.ScanInfo scanInfo = new HStore.ScanInfo(Bytes.toBytes("cf"), + HStore.ScanInfo newScanInfo = new HStore.ScanInfo(Bytes.toBytes("cf"), 0 /* minVersions */, 2 /* maxVersions */, 500 /* ttl */, false /* keepDeletedCells */, 200, /* timeToPurgeDeletes */ KeyValue.COMPARATOR); StoreScanner scanner = - new StoreScanner(scan, scanInfo, + new StoreScanner(scan, newScanInfo, ScanType.MAJOR_COMPACT, null, scanners, HConstants.OLDEST_TIMESTAMP); List results = new ArrayList(); @@ -568,6 +568,7 @@ assertEquals(kvs[14], results.get(5)); assertEquals(kvs[15], results.get(6)); assertEquals(7, results.size()); + scanner.close(); }finally{ EnvironmentEdgeManagerTestHelper.reset(); } Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java (revision 1441701) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java (working copy) @@ -19,7 +19,10 @@ // this is deliberately not in the o.a.h.h.regionserver package // in order to make sure all required classes/method are available +import static org.junit.Assert.assertEquals; + import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -44,21 +47,22 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; import org.apache.hadoop.hbase.regionserver.ScanType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; -import static org.junit.Assert.*; - @Category(MediumTests.class) +@RunWith(Parameterized.class) public class TestCoprocessorScanPolicy { final Log LOG = LogFactory.getLog(getClass()); protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -66,7 +70,6 @@ private static final byte[] Q = Bytes.toBytes("qual"); private static final byte[] R = Bytes.toBytes("row"); - @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -80,9 +83,22 @@ TEST_UTIL.shutdownMiniCluster(); } + @Parameters + public static Collection parameters() { + return HBaseTestingUtility.BOOLEAN_PARAMETERIZED; + } + + public TestCoprocessorScanPolicy(boolean parallelSeekEnable) { + TEST_UTIL.getMiniHBaseCluster().getConf() + .setBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, parallelSeekEnable); + } + @Test public void testBaseCases() throws Exception { byte[] tableName = Bytes.toBytes("baseCases"); + if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) { + TEST_UTIL.deleteTable(tableName); + } HTable t = TEST_UTIL.createTable(tableName, F, 1); // set the version override to 2 Put p = new Put(R); @@ -129,6 +145,9 @@ @Test public void testTTL() throws Exception { byte[] tableName = Bytes.toBytes("testTTL"); + if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) { + TEST_UTIL.deleteTable(tableName); + } HTableDescriptor desc = new HTableDescriptor(tableName); HColumnDescriptor hcd = new HColumnDescriptor(F) .setMaxVersions(10) Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (revision 1441701) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (working copy) @@ -19,10 +19,17 @@ package org.apache.hadoop.hbase.regionserver; + import java.io.IOException; +import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.List; import java.util.NavigableSet; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -59,6 +66,11 @@ protected final boolean isGet; protected final boolean explicitColumnQuery; protected final boolean useRowColBloom; + /** + * A flag that enables StoreFileScanner parallel-seeking + */ + protected boolean isParallelSeekEnabled = false; + protected ExecutorService seekExecutor; protected final Scan scan; protected final NavigableSet columns; protected final long oldestUnexpiredTS; @@ -66,6 +78,8 @@ /** We don't ever expect to change this, the constant is just for clarity. */ static final boolean LAZY_SEEK_ENABLED_BY_DEFAULT = true; + public static final String STORESCANNER_PARALLEL_SEEK_ENABLE = + "hbase.storescanner.parallel.seek.enable"; /** Used during unit testing to ensure that lazy seek does save seek ops */ protected static boolean lazySeekEnabledGlobally = @@ -92,6 +106,19 @@ // for multi-row (non-"get") scans because this is not done in // StoreFile.passesBloomFilter(Scan, SortedSet). useRowColBloom = numCol > 1 || (!isGet && numCol == 1); + // The parallel-seeking is on : + // 1) the config value is *true* + // 2) have more than one store file + if (store != null && store.getHRegion() != null + && store.getStorefilesCount() > 1) { + RegionServerServices rsService = store.getHRegion().getRegionServerServices(); + if (rsService == null) return; + boolean parallelSeekConfFlag = rsService.getConfiguration().getBoolean( + STORESCANNER_PARALLEL_SEEK_ENABLE, false); + if (!parallelSeekConfFlag || !(rsService instanceof HRegionServer)) return; + isParallelSeekEnabled = true; + seekExecutor = ((HRegionServer) rsService).getParallelSFSeekExecutor(); + } } /** @@ -127,8 +154,12 @@ scanner.requestSeek(matcher.getStartKey(), false, true); } } else { - for (KeyValueScanner scanner : scanners) { - scanner.seek(matcher.getStartKey()); + if (!isParallelSeekEnabled) { + for (KeyValueScanner scanner : scanners) { + scanner.seek(matcher.getStartKey()); + } + } else { + parallelSeek(scanners, matcher.getStartKey()); } } @@ -166,8 +197,12 @@ scanners = selectScannersFrom(scanners); // Seek all scanners to the initial key - for(KeyValueScanner scanner : scanners) { - scanner.seek(matcher.getStartKey()); + if (!isParallelSeekEnabled) { + for (KeyValueScanner scanner : scanners) { + scanner.seek(matcher.getStartKey()); + } + } else { + parallelSeek(scanners, matcher.getStartKey()); } // Combine all seeked scanners with a heap @@ -193,8 +228,12 @@ Long.MAX_VALUE, earliestPutTs, oldestUnexpiredTS); // Seek all scanners to the initial key - for (KeyValueScanner scanner : scanners) { - scanner.seek(matcher.getStartKey()); + if (!isParallelSeekEnabled) { + for (KeyValueScanner scanner : scanners) { + scanner.seek(matcher.getStartKey()); + } + } else { + parallelSeek(scanners, matcher.getStartKey()); } heap = new KeyValueHeap(scanners, scanInfo.getComparator()); } @@ -512,8 +551,12 @@ * could have done it now by storing the scan object from the constructor */ List scanners = getScannersNoCompaction(); - for(KeyValueScanner scanner : scanners) { - scanner.seek(lastTopKey); + if (!isParallelSeekEnabled) { + for (KeyValueScanner scanner : scanners) { + scanner.seek(lastTopKey); + } + } else { + parallelSeek(scanners, lastTopKey); } // Combine all seeked scanners with a heap @@ -555,6 +598,58 @@ } /** + * Seek storefiles in parallel to optimize IO latency as much as possible + * @param scanners the list {@link KeyValueScanner}s to be read from + * @param kv the KeyValue on which the operation is being requested + * @throws IOException + */ + private void parallelSeek(final List + scanners, final KeyValue kv) throws IOException { + if (scanners.isEmpty()) return; + int storeFileScannerCount = scanners.size(); + List> futures = new ArrayList>(storeFileScannerCount); + for (KeyValueScanner scanner : scanners) { + if (scanner instanceof StoreFileScanner) { + Callable task = new ScannerSeekWorker(scanner, kv, + MultiVersionConsistencyControl.getThreadReadPoint()); + futures.add(seekExecutor.submit(task)); + } else { + scanner.seek(kv); + } + } + try { + for (Future future : futures) { + future.get(); + } + } catch (InterruptedException ie) { + throw new InterruptedIOException(ie.getMessage()); + } catch (ExecutionException e) { + throw new IOException(e.getMessage()); + } catch (CancellationException ce) { + throw new IOException(ce.getMessage()); + } + } + + private static class ScannerSeekWorker implements Callable { + private KeyValueScanner scanner; + private KeyValue keyValue; + private long readPoint; + + public ScannerSeekWorker(KeyValueScanner scanner, KeyValue keyValue, + long readPoint) { + this.scanner = scanner; + this.keyValue = keyValue; + this.readPoint = readPoint; + } + + public Void call() throws IOException { + MultiVersionConsistencyControl.setThreadReadPoint(readPoint); + scanner.seek(keyValue); + return null; + } + } + + /** * Used in testing. * @return all scanners in no particular order */ Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 1441701) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy) @@ -29,14 +29,12 @@ import java.net.BindException; import java.net.InetSocketAddress; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -48,12 +46,13 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; import javax.management.ObjectName; -import com.google.protobuf.Message; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -66,9 +65,9 @@ import org.apache.hadoop.hbase.FailedSanityCheckException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HealthCheckChore; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HealthCheckChore; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.OutOfOrderScannerNextException; @@ -213,7 +212,9 @@ import org.cliffc.high_scale_lib.Counter; import com.google.common.base.Function; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.protobuf.ByteString; +import com.google.protobuf.Message; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -310,6 +311,8 @@ protected final AtomicBoolean haveRootRegion = new AtomicBoolean(false); private boolean useHBaseChecksum; // verify hbase checksums? + /** Used for StoreFileScanner parallel-seeking*/ + private ThreadPoolExecutor parallelSFSeekExecutor; private Path rootDir; protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); @@ -472,7 +475,12 @@ this.scannerLeaseTimeoutPeriod = conf.getInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); - + if (conf.getBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, false)) { + parallelSFSeekExecutor = (ThreadPoolExecutor) Executors.newCachedThreadPool( + new ThreadFactoryBuilder().setDaemon(true).setNameFormat( + "StoreScannerSeek-%d").build()); + } + // Server to handle client requests. String hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost( conf.get("hbase.regionserver.dns.interface", "default"), @@ -976,6 +984,9 @@ } this.rpcClientEngine.close(); this.leases.close(); + if (parallelSFSeekExecutor != null) { + parallelSFSeekExecutor.shutdownNow(); + } if (!killed) { join(); @@ -2161,6 +2172,13 @@ return service; } + /** + * @return StoreFileScanner parallel-seeking executor + */ + protected ThreadPoolExecutor getParallelSFSeekExecutor() { + return parallelSFSeekExecutor; + } + // // Main program and support routines // Index: hbase-server/src/main/resources/hbase-default.xml =================================================================== --- hbase-server/src/main/resources/hbase-default.xml (revision 1441701) +++ hbase-server/src/main/resources/hbase-default.xml (working copy) @@ -442,6 +442,14 @@ + hbase.storescanner.parallel.seek.enable + false + + Enables StoreFileScanner parallel-seeking in StoreScanner, + a feature which can reduce response latency under special conditions. + + + hbase.mapreduce.hfileoutputformat.blocksize 65536 The mapreduce HFileOutputFormat writes storefiles/hfiles. Index: hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java =================================================================== --- hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java (revision 1441701) +++ hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java (working copy) @@ -95,6 +95,7 @@ rows++; } assertEquals(0, rows); + ht.close(); } @Test @@ -118,6 +119,7 @@ rows++; } assertEquals(0, rows); + ht.close(); } private long invokeBulkDeleteProtocol(byte[] tableName, final Scan scan, final int rowBatchSize, @@ -147,6 +149,7 @@ for (BulkDeleteResponse response : result.values()) { noOfDeletedRows += response.getRowsDeleted(); } + ht.close(); return noOfDeletedRows; } @@ -177,6 +180,7 @@ rows++; } assertEquals(90, rows); + ht.close(); } @Test @@ -205,6 +209,7 @@ rows++; } assertEquals(100, rows); + ht.close(); } @Test @@ -235,6 +240,7 @@ rows++; } assertEquals(100, rows); + ht.close(); } @Test @@ -282,6 +288,7 @@ rows++; } assertEquals(100, rows); + ht.close(); } @Test @@ -328,6 +335,7 @@ rows++; } assertEquals(100, rows); + ht.close(); } @Test @@ -412,6 +420,7 @@ rows++; } assertEquals(100, rows); + ht.close(); } private HTable createTable(byte[] tableName) throws IOException {