diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
index 54c97d7..dc325a3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
@@ -23,14 +23,12 @@ import java.util.Iterator;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-import org.apache.hadoop.hbase.util.Bytes;
/**
* Helper class for custom client scanners.
*/
@InterfaceAudience.Private
public abstract class AbstractClientScanner implements ResultScanner {
-
protected ScanMetrics scanMetrics;
/**
@@ -38,14 +36,19 @@ public abstract class AbstractClientScanner implements ResultScanner {
*/
protected void initScanMetrics(Scan scan) {
// check if application wants to collect scan metrics
- byte[] enableMetrics = scan.getAttribute(
- Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
- if (enableMetrics != null && Bytes.toBoolean(enableMetrics)) {
+ if (scan.isScanMetricsEnabled()) {
scanMetrics = new ScanMetrics();
}
}
- // TODO: should this be at ResultScanner? ScanMetrics is not public API it seems.
+ /**
+ * Used internally accumulating metrics on scan. To
+ * enable collection of metrics on a Scanner, call {@link Scan#setScanMetricsEnabled(boolean)}.
+ * These metrics are cleared at key transition points. Metrics are accumulated in the
+ * {@link Scan} object itself.
+ * @see Scan#getScanMetrics()
+ * @return Returns the running {@link ScanMetrics} instance or null if scan metrics not enabled.
+ */
public ScanMetrics getScanMetrics() {
return scanMetrics;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index d31642a..110b039 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -317,9 +317,9 @@ public class ClientScanner extends AbstractClientScanner {
* machine; for scan/map reduce scenarios, we will have multiple scans running at the same time.
*
* By default, scan metrics are disabled; if the application wants to collect them, this
- * behavior can be turned on by calling calling:
- *
- * scan.setAttribute(SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE))
+ * behavior can be turned on by calling calling {@link Scan#setScanMetricsEnabled(boolean)}
+ *
+ *
This invocation clears the scan metrics. Metrics are aggregated in the Scan instance.
*/
protected void writeScanMetrics() {
if (this.scanMetrics == null || scanMetricsPublished) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index d2dd770..3a11a84 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -34,9 +34,11 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.visibility.Authorizations;
import org.apache.hadoop.hbase.util.Bytes;
@@ -117,9 +119,18 @@ public class Scan extends Query {
private int storeOffset = 0;
private boolean getScan;
- // If application wants to collect scan metrics, it needs to
- // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE))
+ /**
+ * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)}
+ */
+ // Make private or remove.
+ @Deprecated
static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
+
+ /**
+ * Use {@link #getScanMetrics()}
+ */
+ // Make this private or remove.
+ @Deprecated
static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data";
// If an application wants to use multiple scans over different tables each scan must
@@ -916,4 +927,29 @@ public class Scan extends Query {
scan.setCaching(1);
return scan;
}
+
+ /**
+ * Enable collection of {@link ScanMetrics}. For advanced users.
+ * @param enabled Set to true to enable accumulating scan metrics
+ */
+ public Scan setScanMetricsEnabled(final boolean enabled) {
+ setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
+ return this;
+ }
+
+ /**
+ * @return True if collection of scan metrics is enabled. For advanced users.
+ */
+ public boolean isScanMetricsEnabled() {
+ byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
+ return attr == null ? false : Bytes.toBoolean(attr);
+ }
+
+ /**
+ * @return Metrics on this Scan, if metrics were enabled.
+ * @see #setScanMetricsEnabled(boolean)
+ */
+ public ScanMetrics getScanMetrics() {
+ return ProtobufUtil.toScanMetrics(getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA));
+ }
}
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java
index 86bc120..dbaec12 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java
@@ -22,15 +22,13 @@ import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import com.google.common.collect.ImmutableMap;
/**
- * Provides client-side metrics related to scan operations
+ * Provides client-side metrics related to scan operations.
* The data can be passed to mapreduce framework or other systems.
* We use atomic longs so that one thread can increment,
* while another atomically resets to zero after the values are reported
@@ -43,9 +41,6 @@ import com.google.common.collect.ImmutableMap;
@InterfaceAudience.Private
public class ScanMetrics {
-
- private static final Log LOG = LogFactory.getLog(ScanMetrics.class);
-
/**
* Hash to hold the String -> Atomic Long mappings.
*/
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 8566a88..5074848 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.io.RawComparator;
import com.google.common.annotations.VisibleForTesting;
/**
- * An HBase Key/Value. This is the fundamental HBase Type.
+ * An HBase Key/Value. This is the fundamental HBase Type.
*
* HBase applications and users should use the Cell interface and avoid directly using KeyValue
* and member functions not defined in Cell.
@@ -577,7 +577,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
this(row, roffset, rlength, family, foffset, flength, qualifier, qoffset,
qlength, timestamp, type, value, voffset, vlength, null);
}
-
+
/**
* Constructs KeyValue structure filled with specified values. Uses the provided buffer as the
* data buffer.
@@ -742,9 +742,9 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
public KeyValue(Cell c) {
this(c.getRowArray(), c.getRowOffset(), (int)c.getRowLength(),
- c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(),
- c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(),
- c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(),
+ c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(),
+ c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(),
+ c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(),
c.getValueLength(), c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
this.seqId = c.getSequenceId();
}
@@ -955,7 +955,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
final int rlength, final byte [] family, final int foffset, int flength,
final byte [] qualifier, final int qoffset, int qlength,
final long timestamp, final Type type,
- final byte [] value, final int voffset,
+ final byte [] value, final int voffset,
int vlength, byte[] tags, int tagsOffset, int tagsLength) {
checkParameters(row, rlength, family, flength, qlength, vlength);
@@ -1125,10 +1125,10 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
/**
* @param k Key portion of a KeyValue.
- * @return Key as a String, empty string if k is null.
+ * @return Key as a String, empty string if k is null.
*/
public static String keyToString(final byte [] k) {
- if (k == null) {
+ if (k == null) {
return "";
}
return keyToString(k, 0, k.length);
@@ -1846,7 +1846,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
return compareFlatKey(l,loff,llen, r,roff,rlen);
}
-
+
/**
* Compares the only the user specified portion of a Key. This is overridden by MetaComparator.
* @param left
@@ -2214,7 +2214,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
* @param leftKey
* @param rightKey
* @return 0 if equal, <0 if left smaller, >0 if right smaller
- * @deprecated Since 0.99.2; Use {@link CellComparator#getMidpoint(Cell, Cell)} instead.
+ * @deprecated Since 0.99.2; Use CellComparator#getMidpoint(Comparator, Cell, Cell) instead.
*/
@Deprecated
public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) {
@@ -2354,7 +2354,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
in.readFully(bytes);
return new KeyValue(bytes, 0, length);
}
-
+
/**
* Create a new KeyValue by copying existing cell and adding new tags
* @param c
@@ -2370,9 +2370,9 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
existingTags = newTags;
}
return new KeyValue(c.getRowArray(), c.getRowOffset(), (int)c.getRowLength(),
- c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(),
- c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(),
- c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(),
+ c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(),
+ c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(),
+ c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(),
c.getValueLength(), existingTags);
}
@@ -2485,7 +2485,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
/**
* Avoids redundant comparisons for better performance.
- *
+ *
* TODO get rid of this wart
*/
public interface SamePrefixComparator {
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index eb5f506..0300ea2 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -250,10 +250,12 @@ public class RemoteHTable implements Table {
return TableName.valueOf(name);
}
+ @Override
public Configuration getConfiguration() {
return conf;
}
+ @Override
public HTableDescriptor getTableDescriptor() throws IOException {
StringBuilder sb = new StringBuilder();
sb.append('/');
@@ -282,10 +284,12 @@ public class RemoteHTable implements Table {
throw new IOException("schema request timed out");
}
+ @Override
public void close() throws IOException {
client.shutdown();
}
+ @Override
public Result get(Get get) throws IOException {
TimeRange range = get.getTimeRange();
String spec = buildRowSpec(get.getRow(), get.getFamilyMap(),
@@ -304,6 +308,7 @@ public class RemoteHTable implements Table {
}
}
+ @Override
public Result[] get(List gets) throws IOException {
byte[][] rows = new byte[gets.size()][];
int maxVersions = 1;
@@ -360,6 +365,7 @@ public class RemoteHTable implements Table {
throw new IOException("get request timed out");
}
+ @Override
public boolean exists(Get get) throws IOException {
LOG.warn("exists() is really get(), just use get()");
Result result = get(get);
@@ -370,6 +376,7 @@ public class RemoteHTable implements Table {
* exists(List) is really a list of get() calls. Just use get().
* @param gets list of Get to test for the existence
*/
+ @Override
public boolean[] existsAll(List gets) throws IOException {
LOG.warn("exists(List) is really list of get() calls, just use get()");
boolean[] results = new boolean[gets.size()];
@@ -389,6 +396,7 @@ public class RemoteHTable implements Table {
return objectResults;
}
+ @Override
public void put(Put put) throws IOException {
CellSetModel model = buildModelFromPut(put);
StringBuilder sb = new StringBuilder();
@@ -417,6 +425,7 @@ public class RemoteHTable implements Table {
throw new IOException("put request timed out");
}
+ @Override
public void put(List puts) throws IOException {
// this is a trick: The gateway accepts multiple rows in a cell set and
// ignores the row specification in the URI
@@ -472,6 +481,7 @@ public class RemoteHTable implements Table {
throw new IOException("multiput request timed out");
}
+ @Override
public void delete(Delete delete) throws IOException {
String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(),
delete.getTimeStamp(), delete.getTimeStamp(), 1);
@@ -495,6 +505,7 @@ public class RemoteHTable implements Table {
throw new IOException("delete request timed out");
}
+ @Override
public void delete(List deletes) throws IOException {
for (Delete delete: deletes) {
delete(delete);
@@ -632,19 +643,21 @@ public class RemoteHTable implements Table {
LOG.warn(StringUtils.stringifyException(e));
}
}
-
}
+ @Override
public ResultScanner getScanner(Scan scan) throws IOException {
return new Scanner(scan);
}
+ @Override
public ResultScanner getScanner(byte[] family) throws IOException {
Scan scan = new Scan();
scan.addFamily(family);
return new Scanner(scan);
}
+ @Override
public ResultScanner getScanner(byte[] family, byte[] qualifier)
throws IOException {
Scan scan = new Scan();
@@ -660,6 +673,7 @@ public class RemoteHTable implements Table {
throw new IOException("getRowOrBefore not supported");
}
+ @Override
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
byte[] value, Put put) throws IOException {
// column to check-the-value
@@ -696,11 +710,13 @@ public class RemoteHTable implements Table {
throw new IOException("checkAndPut request timed out");
}
+ @Override
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
CompareOp compareOp, byte[] value, Put put) throws IOException {
throw new IOException("checkAndPut for non-equal comparison not implemented");
}
+ @Override
public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
byte[] value, Delete delete) throws IOException {
Put put = new Put(row);
@@ -737,24 +753,29 @@ public class RemoteHTable implements Table {
throw new IOException("checkAndDelete request timed out");
}
+ @Override
public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
CompareOp compareOp, byte[] value, Delete delete) throws IOException {
throw new IOException("checkAndDelete for non-equal comparison not implemented");
}
+ @Override
public Result increment(Increment increment) throws IOException {
throw new IOException("Increment not supported");
}
+ @Override
public Result append(Append append) throws IOException {
throw new IOException("Append not supported");
}
+ @Override
public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
long amount) throws IOException {
throw new IOException("incrementColumnValue not supported");
}
+ @Override
public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
long amount, Durability durability) throws IOException {
throw new IOException("incrementColumnValue not supported");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
index 47f6869..06fa712 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.client.ScannerCallable;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputSplit;
@@ -80,8 +79,7 @@ public class TableRecordReaderImpl {
public void restart(byte[] firstRow) throws IOException {
currentScan = new Scan(scan);
currentScan.setStartRow(firstRow);
- currentScan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE,
- Bytes.toBytes(Boolean.TRUE));
+ currentScan.setScanMetricsEnabled(true);
if (this.scanner != null) {
if (logScannerActivity) {
LOG.info("Closing the previously opened scanner object.");
@@ -265,14 +263,11 @@ public class TableRecordReaderImpl {
* @throws IOException
*/
private void updateCounters() throws IOException {
- byte[] serializedMetrics = currentScan.getAttribute(
- Scan.SCAN_ATTRIBUTES_METRICS_DATA);
- if (serializedMetrics == null || serializedMetrics.length == 0 ) {
+ ScanMetrics scanMetrics = this.scan.getScanMetrics();
+ if (scanMetrics == null) {
return;
}
- ScanMetrics scanMetrics = ProtobufUtil.toScanMetrics(serializedMetrics);
-
updateCounters(scanMetrics, numRestarts, getCounter, context, numStale);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 3653cfb..b851897 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -2056,7 +2056,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
if (!isLoadingCfsOnDemandSet) {
scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault());
}
- scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
region.prepareScanner(scan);
if (region.getCoprocessorHost() != null) {
scanner = region.getCoprocessorHost().preScannerOpen(scan);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
index 0c331b7..24e9590 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
@@ -39,9 +38,7 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
-import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Counters;
@@ -137,7 +134,7 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool {
Scan scan = new Scan(); // default scan settings
scan.setCacheBlocks(false);
scan.setMaxVersions(1);
- scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE));
+ scan.setScanMetricsEnabled(true);
if (caching != null) {
scan.setCaching(Integer.parseInt(caching));
}
@@ -177,7 +174,7 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool {
table.close();
connection.close();
- ScanMetrics metrics = ProtobufUtil.toScanMetrics(scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA));
+ ScanMetrics metrics = scan.getScanMetrics();
long totalBytes = metrics.countOfBytesInResults.get();
double throughput = (double)totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
double throughputRows = (double)numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index c77ab29..60f0a10 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -5003,28 +5003,30 @@ public class TestFromClientSide {
Scan scan1 = new Scan();
int numRecords = 0;
- for(Result result : ht.getScanner(scan1)) {
+ ResultScanner scanner = ht.getScanner(scan1);
+ for(Result result : scanner) {
numRecords++;
}
+ scanner.close();
LOG.info("test data has " + numRecords + " records.");
// by default, scan metrics collection is turned off
- assertEquals(null, scan1.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA));
+ assertEquals(null, scan1.getScanMetrics());
// turn on scan metrics
Scan scan = new Scan();
- scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE));
+ scan.setScanMetricsEnabled(true);
scan.setCaching(numRecords+1);
- ResultScanner scanner = ht.getScanner(scan);
+ scanner = ht.getScanner(scan1);
for (Result result : scanner.next(numRecords - 1)) {
}
scanner.close();
// closing the scanner will set the metrics.
- assertNotNull(scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA));
+ assertNotNull(scan.getScanMetrics());
// set caching to 1, becasue metrics are collected in each roundtrip only
scan = new Scan();
- scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE));
+ scan.setScanMetricsEnabled(true);
scan.setCaching(1);
scanner = ht.getScanner(scan);
// per HBASE-5717, this should still collect even if you don't run all the way to
@@ -5041,7 +5043,7 @@ public class TestFromClientSide {
// run past the end of all the records
Scan scanWithoutClose = new Scan();
scanWithoutClose.setCaching(1);
- scanWithoutClose.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE));
+ scan.setScanMetricsEnabled(true);
ResultScanner scannerWithoutClose = ht.getScanner(scanWithoutClose);
for (Result result : scannerWithoutClose.next(numRecords + 1)) {
}
@@ -5054,7 +5056,7 @@ public class TestFromClientSide {
Scan scanWithClose = new Scan();
// make sure we can set caching up to the number of a scanned values
scanWithClose.setCaching(numRecords);
- scanWithClose.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE));
+ scan.setScanMetricsEnabled(true);
ResultScanner scannerWithClose = ht.getScanner(scanWithClose);
for (Result result : scannerWithClose.next(numRecords + 1)) {
}
@@ -5068,7 +5070,6 @@ public class TestFromClientSide {
byte[] serializedMetrics = scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
assertTrue("Serialized metrics were not found.", serializedMetrics != null);
-
ScanMetrics scanMetrics = ProtobufUtil.toScanMetrics(serializedMetrics);
return scanMetrics;
@@ -5209,10 +5210,10 @@ public class TestFromClientSide {
// Verify region location before move.
HRegionLocation addrCache = table.getRegionLocation(regionInfo.getStartKey(), false);
HRegionLocation addrNoCache = table.getRegionLocation(regionInfo.getStartKey(), true);
-
+
assertEquals(addrBefore.getPort(), addrCache.getPort());
assertEquals(addrBefore.getPort(), addrNoCache.getPort());
-
+
ServerName addrAfter = null;
// Now move the region to a different server.
for (int i = 0; i < SLAVES; i++) {
@@ -5227,7 +5228,7 @@ public class TestFromClientSide {
break;
}
}
-
+
// Verify the region was moved.
addrCache = table.getRegionLocation(regionInfo.getStartKey(), false);
addrNoCache = table.getRegionLocation(regionInfo.getStartKey(), true);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
index a6c1cfe..ff0166b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -96,6 +97,38 @@ public class TestScannersFromClientSide {
}
/**
+ * Test ScanMetrics are available
+ * @throws IOException
+ */
+ @Test (timeout=300000)
+ public void testScanMetrics() throws IOException {
+ TableName TABLE = TableName.valueOf("testScanMetrics");
+ byte [][] regions = new byte [][] {Bytes.toBytes("a"), Bytes.toBytes("d")};
+ try (Table ht = TEST_UTIL.createTable(TABLE, FAMILY,
+ new byte [][] {Bytes.toBytes("a"), Bytes.toBytes("d")})) {
+ // Add an arbitrary row to each of our regions.
+ for (int i = 0; i < regions.length; i++) {
+ Put put = new Put(regions[i]);
+ put.add(FAMILY, FAMILY, FAMILY);
+ ht.put(put);
+ }
+ Scan scan = new Scan();
+ scan.setScanMetricsEnabled(true);
+ try (ResultScanner scanner = ht.getScanner(scan)) {
+ Result result = null;
+ while((result = scanner.next()) != null) {
+ System.out.println(result);
+ };
+ // Just make sure basically works. Metrics will likely all be zero since used by metrics
+ // system and metrics clears scan metrics after it reads them which it does on a period
+ // and at junctures like close of scan and transitioning across regions
+ assertTrue(scan.getScanMetrics() != null);
+ assertTrue(scan.getScanMetrics().countOfRegions.get() > 0);
+ }
+ }
+ }
+
+ /**
* Test from client side for batch of scan
*
* @throws Exception