Index: src/main/java/org/apache/hadoop/hbase/regionserver/InternalScan.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/InternalScan.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/InternalScan.java (revision 0)
@@ -0,0 +1,78 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Scan;
+
+/**
+ * Special internal-only scanner, currently used for increment operations to
+ * allow additional server-side arguments for Scan operations.
+ *
+ * Rather than adding new options/parameters to the public Scan API, this new
+ * class has been created.
+ *
+ * Supports adding an option to only read from the MemStore with
+ * {@link #checkOnlyMemStore()} or to only read from StoreFiles with
+ * {@link #checkOnlyStoreFiles()}.
+ */
+class InternalScan extends Scan {
+ private boolean memOnly = false;
+ private boolean filesOnly = false;
+
+ /**
+ * @param get get to model scan after
+ */
+ public InternalScan(Get get) {
+ super(get);
+ }
+
+ /**
+ * StoreFiles will not be scanned. Only MemStore will be scanned.
+ */
+ public void checkOnlyMemStore() {
+ memOnly = true;
+ filesOnly = false;
+ }
+
+ /**
+ * MemStore will not be scanned. Only StoreFiles will be scanned.
+ */
+ public void checkOnlyStoreFiles() {
+ memOnly = false;
+ filesOnly = true;
+ }
+
+ /**
+ * Returns true if only the MemStore should be checked. False if not.
+ * @return true to only check MemStore
+ */
+ public boolean isCheckOnlyMemStore() {
+ return (memOnly);
+ }
+
+ /**
+ * Returns true if only StoreFiles should be checked. False if not.
+ * @return true if only check StoreFiles
+ */
+ public boolean isCheckOnlyStoreFiles() {
+ return (filesOnly);
+ }
+}
\ No newline at end of file
Index: src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (revision 1026902)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (working copy)
@@ -27,6 +27,7 @@
import java.io.IOException;
import java.util.ArrayList;
+import java.util.LinkedList;
import java.util.List;
import java.util.NavigableSet;
@@ -149,22 +150,33 @@
*/
private List getScanners(Scan scan,
final NavigableSet columns) throws IOException {
+ boolean memOnly;
+ boolean filesOnly;
+ if (scan instanceof InternalScan) {
+ InternalScan iscan = (InternalScan)scan;
+ memOnly = iscan.isCheckOnlyMemStore();
+ filesOnly = iscan.isCheckOnlyStoreFiles();
+ } else {
+ memOnly = false;
+ filesOnly = false;
+ }
+ List scanners = new LinkedList();
// First the store file scanners
- List sfScanners = StoreFileScanner
+ if (memOnly == false) {
+ List sfScanners = StoreFileScanner
.getScannersForStoreFiles(store.getStorefiles(), cacheBlocks, isGet);
- List scanners =
- new ArrayList(sfScanners.size()+1);
- // include only those scan files which pass all filters
- for (StoreFileScanner sfs : sfScanners) {
- if (sfs.shouldSeek(scan, columns)) {
- scanners.add(sfs);
+ // include only those scan files which pass all filters
+ for (StoreFileScanner sfs : sfScanners) {
+ if (sfs.shouldSeek(scan, columns)) {
+ scanners.add(sfs);
+ }
}
}
// Then the memstore scanners
- if (this.store.memstore.shouldSeek(scan)) {
- scanners.addAll(this.store.memstore.getScanners());
+ if ((filesOnly == false) && (this.store.memstore.shouldSeek(scan))) {
+ scanners.addAll(this.store.memstore.getScanners());
}
return scanners;
}
Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1026902)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy)
@@ -2859,6 +2859,86 @@
return new Result(result);
}
+ /**
+ * An optimized version of {@link #get(Get)} that checks MemStore first for
+ * the specified query.
+ *
+ * This is intended for use by increment operations where we have the
+ * guarantee that versions are never inserted out-of-order so if a value
+ * exists in MemStore it is the latest value.
+ *
+ * It only makes sense to use this method without a TimeRange and maxVersions
+ * equal to 1.
+ * @param get
+ * @return result
+ * @throws IOException
+ */
+ private List getLastIncrement(final Get get) throws IOException {
+ InternalScan iscan = new InternalScan(get);
+
+ List results = new ArrayList();
+
+ // memstore scan
+ iscan.checkOnlyMemStore();
+ InternalScanner scanner = null;
+ try {
+ scanner = getScanner(iscan);
+ scanner.next(results);
+ } finally {
+ if (scanner != null)
+ scanner.close();
+ }
+
+ // count how many columns we're looking for
+ int expected = 0;
+ Map> familyMap = get.getFamilyMap();
+ for (NavigableSet qfs : familyMap.values()) {
+ expected += qfs.size();
+ }
+
+ // found everything we were looking for, done
+ if (results.size() == expected) {
+ return results;
+ }
+
+ // still have more columns to find
+ if (results != null && !results.isEmpty()) {
+ // subtract what was found in memstore
+ for (KeyValue kv : results) {
+ byte [] family = kv.getFamily();
+ NavigableSet qfs = familyMap.get(family);
+ qfs.remove(kv.getQualifier());
+ if (qfs.isEmpty()) familyMap.remove(family);
+ expected--;
+ }
+ // make a new get for just what is left
+ Get newGet = new Get(get.getRow());
+ for (Map.Entry> f : familyMap.entrySet()) {
+ byte [] family = f.getKey();
+ for (byte [] qualifier : f.getValue()) {
+ newGet.addColumn(family, qualifier);
+ }
+ }
+ iscan = new InternalScan(newGet);
+ }
+
+ // check store files for what is left
+ List fileResults = new ArrayList();
+ iscan.checkOnlyStoreFiles();
+ scanner = null;
+ try {
+ scanner = getScanner(iscan);
+ scanner.next(fileResults);
+ } finally {
+ if (scanner != null)
+ scanner.close();
+ }
+
+ // combine and return
+ results.addAll(fileResults);
+ return results;
+ }
+
/*
* Do a get based on the get parameter.
*/
@@ -2905,7 +2985,7 @@
Get get = new Get(row);
get.addColumn(family, qualifier);
- List results = get(get);
+ List results = getLastIncrement(get);
if (!results.isEmpty()) {
KeyValue kv = results.get(0);
@@ -2914,7 +2994,7 @@
result += Bytes.toLong(buffer, valueOffset, Bytes.SIZEOF_LONG);
}
- // bulid the KeyValue now:
+ // build the KeyValue now:
KeyValue newKv = new KeyValue(row, family,
qualifier, EnvironmentEdgeManager.currentTimeMillis(),
Bytes.toBytes(result));
@@ -2930,7 +3010,7 @@
// Now request the ICV to the store, this will set the timestamp
// appropriately depending on if there is a value in memcache or not.
- // returns the
+ // returns the change in the size of the memstore from operation
long size = store.updateColumnValue(row, family, qualifier, result);
size = this.memstoreSize.addAndGet(size);
Index: pom.xml
===================================================================
--- pom.xml (revision 1026902)
+++ pom.xml (working copy)
@@ -10,7 +10,7 @@
org.apache.hbase
- hbase
+ hbase-clean-trunk
jar
0.89.0-SNAPSHOT
HBase