diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java index 54c97d7..65fc2cd 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java @@ -23,7 +23,6 @@ import java.util.Iterator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; -import org.apache.hadoop.hbase.util.Bytes; /** * Helper class for custom client scanners. @@ -38,14 +37,13 @@ public abstract class AbstractClientScanner implements ResultScanner { */ protected void initScanMetrics(Scan scan) { // check if application wants to collect scan metrics - byte[] enableMetrics = scan.getAttribute( - Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); - if (enableMetrics != null && Bytes.toBoolean(enableMetrics)) { + if (scan.isMetricsEnabled()) { scanMetrics = new ScanMetrics(); } } // TODO: should this be at ResultScanner? ScanMetrics is not public API it seems. + @Override public ScanMetrics getScanMetrics() { return scanMetrics; } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java index 381505c..949fff4 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java @@ -23,6 +23,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; /** * Interface for client-side scanning. @@ -33,6 +34,23 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; public interface ResultScanner extends Closeable, Iterable { /** + * Scan metrics for advanced users. Returns null unless scan metrics enabled for a Scan. To + * enable metrics, set the following Scan attribute post Scan construction but before you + * get {@link ResultScanner} from {@link Table}: e.g. + * scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); + * Casting your ResultScanner to an instance of this class should allow you call the below: + * + * Scan scan = new Scan(....); + * scan.setMetricsEnabled(true); + * try (ResultScanner scanner = table.getScanner(scan)) { + * ... + * ScanMetrics metrics = scanner.getScanMetrics(); + * } + * @return Returns the running {@link ScanMetrics} instance or null if scan metrics not enabled. + */ + ScanMetrics getScanMetrics(); + + /** * Grab the next row's worth of values. The scanner will return a Result. * @return Result object if there is another row, null if the scanner is * exhausted. diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index d2dd770..ad71355 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -117,9 +117,16 @@ public class Scan extends Query { private int storeOffset = 0; private boolean getScan; - // If application wants to collect scan metrics, it needs to - // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE)) + /** + * @deprecated since 1.0.0. Use {@link #setMetricsEnabled(boolean)} + */ + @Deprecated static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable"; + + /** + * Use {@link ResultScanner#getScanMetrics()} + */ + @Deprecated static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data"; // If an application wants to use multiple scans over different tables each scan must @@ -916,4 +923,22 @@ public class Scan extends Query { scan.setCaching(1); return scan; } -} \ No newline at end of file + + /** + * Helper method for advanced users. Enable collection of {@link ScanMetrics} + * @param scan Scan to enable scan metrics on. + */ + public Scan setMetricsEnabled(final boolean enabled) { + setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled))); + return this; + } + + /** + * Helper method for advanced users. Enable collection of {@link ScanMetrics} + * @param scan Scan to enable scan metrics on. + */ + public boolean isMetricsEnabled() { + byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); + return attr == null ? false : Bytes.toBoolean(attr); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java index 86bc120..dbaec12 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java @@ -22,15 +22,13 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import com.google.common.collect.ImmutableMap; /** - * Provides client-side metrics related to scan operations + * Provides client-side metrics related to scan operations. * The data can be passed to mapreduce framework or other systems. * We use atomic longs so that one thread can increment, * while another atomically resets to zero after the values are reported @@ -43,9 +41,6 @@ import com.google.common.collect.ImmutableMap; @InterfaceAudience.Private public class ScanMetrics { - - private static final Log LOG = LogFactory.getLog(ScanMetrics.class); - /** * Hash to hold the String -> Atomic Long mappings. */ diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 8566a88..5074848 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -47,7 +47,7 @@ import org.apache.hadoop.io.RawComparator; import com.google.common.annotations.VisibleForTesting; /** - * An HBase Key/Value. This is the fundamental HBase Type. + * An HBase Key/Value. This is the fundamental HBase Type. *

* HBase applications and users should use the Cell interface and avoid directly using KeyValue * and member functions not defined in Cell. @@ -577,7 +577,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, this(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength, timestamp, type, value, voffset, vlength, null); } - + /** * Constructs KeyValue structure filled with specified values. Uses the provided buffer as the * data buffer. @@ -742,9 +742,9 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, public KeyValue(Cell c) { this(c.getRowArray(), c.getRowOffset(), (int)c.getRowLength(), - c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(), - c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(), - c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(), + c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(), + c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(), + c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(), c.getValueLength(), c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); this.seqId = c.getSequenceId(); } @@ -955,7 +955,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, final int rlength, final byte [] family, final int foffset, int flength, final byte [] qualifier, final int qoffset, int qlength, final long timestamp, final Type type, - final byte [] value, final int voffset, + final byte [] value, final int voffset, int vlength, byte[] tags, int tagsOffset, int tagsLength) { checkParameters(row, rlength, family, flength, qlength, vlength); @@ -1125,10 +1125,10 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, /** * @param k Key portion of a KeyValue. - * @return Key as a String, empty string if k is null. + * @return Key as a String, empty string if k is null. */ public static String keyToString(final byte [] k) { - if (k == null) { + if (k == null) { return ""; } return keyToString(k, 0, k.length); @@ -1846,7 +1846,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, return compareFlatKey(l,loff,llen, r,roff,rlen); } - + /** * Compares the only the user specified portion of a Key. This is overridden by MetaComparator. * @param left @@ -2214,7 +2214,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * @param leftKey * @param rightKey * @return 0 if equal, <0 if left smaller, >0 if right smaller - * @deprecated Since 0.99.2; Use {@link CellComparator#getMidpoint(Cell, Cell)} instead. + * @deprecated Since 0.99.2; Use CellComparator#getMidpoint(Comparator, Cell, Cell) instead. */ @Deprecated public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) { @@ -2354,7 +2354,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, in.readFully(bytes); return new KeyValue(bytes, 0, length); } - + /** * Create a new KeyValue by copying existing cell and adding new tags * @param c @@ -2370,9 +2370,9 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, existingTags = newTags; } return new KeyValue(c.getRowArray(), c.getRowOffset(), (int)c.getRowLength(), - c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(), - c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(), - c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(), + c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(), + c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(), + c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(), c.getValueLength(), existingTags); } @@ -2485,7 +2485,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, /** * Avoids redundant comparisons for better performance. - * + * * TODO get rid of this wart */ public interface SamePrefixComparator { diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index eb5f506..299b379 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; @@ -250,10 +251,12 @@ public class RemoteHTable implements Table { return TableName.valueOf(name); } + @Override public Configuration getConfiguration() { return conf; } + @Override public HTableDescriptor getTableDescriptor() throws IOException { StringBuilder sb = new StringBuilder(); sb.append('/'); @@ -282,10 +285,12 @@ public class RemoteHTable implements Table { throw new IOException("schema request timed out"); } + @Override public void close() throws IOException { client.shutdown(); } + @Override public Result get(Get get) throws IOException { TimeRange range = get.getTimeRange(); String spec = buildRowSpec(get.getRow(), get.getFamilyMap(), @@ -304,6 +309,7 @@ public class RemoteHTable implements Table { } } + @Override public Result[] get(List gets) throws IOException { byte[][] rows = new byte[gets.size()][]; int maxVersions = 1; @@ -360,6 +366,7 @@ public class RemoteHTable implements Table { throw new IOException("get request timed out"); } + @Override public boolean exists(Get get) throws IOException { LOG.warn("exists() is really get(), just use get()"); Result result = get(get); @@ -370,6 +377,7 @@ public class RemoteHTable implements Table { * exists(List) is really a list of get() calls. Just use get(). * @param gets list of Get to test for the existence */ + @Override public boolean[] existsAll(List gets) throws IOException { LOG.warn("exists(List) is really list of get() calls, just use get()"); boolean[] results = new boolean[gets.size()]; @@ -389,6 +397,7 @@ public class RemoteHTable implements Table { return objectResults; } + @Override public void put(Put put) throws IOException { CellSetModel model = buildModelFromPut(put); StringBuilder sb = new StringBuilder(); @@ -417,6 +426,7 @@ public class RemoteHTable implements Table { throw new IOException("put request timed out"); } + @Override public void put(List puts) throws IOException { // this is a trick: The gateway accepts multiple rows in a cell set and // ignores the row specification in the URI @@ -472,6 +482,7 @@ public class RemoteHTable implements Table { throw new IOException("multiput request timed out"); } + @Override public void delete(Delete delete) throws IOException { String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(), delete.getTimeStamp(), delete.getTimeStamp(), 1); @@ -495,6 +506,7 @@ public class RemoteHTable implements Table { throw new IOException("delete request timed out"); } + @Override public void delete(List deletes) throws IOException { for (Delete delete: deletes) { delete(delete); @@ -633,18 +645,27 @@ public class RemoteHTable implements Table { } } + @Override + public ScanMetrics getScanMetrics() { + // TODO: no support for scan metrics + return null; + } + } + @Override public ResultScanner getScanner(Scan scan) throws IOException { return new Scanner(scan); } + @Override public ResultScanner getScanner(byte[] family) throws IOException { Scan scan = new Scan(); scan.addFamily(family); return new Scanner(scan); } + @Override public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException { Scan scan = new Scan(); @@ -660,6 +681,7 @@ public class RemoteHTable implements Table { throw new IOException("getRowOrBefore not supported"); } + @Override public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException { // column to check-the-value @@ -696,11 +718,13 @@ public class RemoteHTable implements Table { throw new IOException("checkAndPut request timed out"); } + @Override public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, byte[] value, Put put) throws IOException { throw new IOException("checkAndPut for non-equal comparison not implemented"); } + @Override public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete) throws IOException { Put put = new Put(row); @@ -737,24 +761,29 @@ public class RemoteHTable implements Table { throw new IOException("checkAndDelete request timed out"); } + @Override public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, byte[] value, Delete delete) throws IOException { throw new IOException("checkAndDelete for non-equal comparison not implemented"); } + @Override public Result increment(Increment increment) throws IOException { throw new IOException("Increment not supported"); } + @Override public Result append(Append append) throws IOException { throw new IOException("Append not supported"); } + @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) throws IOException { throw new IOException("incrementColumnValue not supported"); } + @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, Durability durability) throws IOException { throw new IOException("incrementColumnValue not supported"); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java index 47f6869..a6650c1 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.client.ScannerCallable; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.InputSplit; @@ -80,8 +79,7 @@ public class TableRecordReaderImpl { public void restart(byte[] firstRow) throws IOException { currentScan = new Scan(scan); currentScan.setStartRow(firstRow); - currentScan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, - Bytes.toBytes(Boolean.TRUE)); + currentScan.setMetricsEnabled(true); if (this.scanner != null) { if (logScannerActivity) { LOG.info("Closing the previously opened scanner object."); @@ -265,14 +263,11 @@ public class TableRecordReaderImpl { * @throws IOException */ private void updateCounters() throws IOException { - byte[] serializedMetrics = currentScan.getAttribute( - Scan.SCAN_ATTRIBUTES_METRICS_DATA); - if (serializedMetrics == null || serializedMetrics.length == 0 ) { + ScanMetrics scanMetrics = scanner.getScanMetrics(); + if (scanMetrics == null) { return; } - ScanMetrics scanMetrics = ProtobufUtil.toScanMetrics(serializedMetrics); - updateCounters(scanMetrics, numRestarts, getCounter, context, numStale); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 3653cfb..b851897 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2056,7 +2056,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (!isLoadingCfsOnDemandSet) { scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault()); } - scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); region.prepareScanner(scan); if (region.getCoprocessorHost() != null) { scanner = region.getCoprocessorHost().preScannerOpen(scan); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java index 0c331b7..5f242e0 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -39,9 +38,7 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapper; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.AbstractHBaseTool; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapreduce.Counters; @@ -137,7 +134,7 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { Scan scan = new Scan(); // default scan settings scan.setCacheBlocks(false); scan.setMaxVersions(1); - scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); + scan.setMetricsEnabled(true); if (caching != null) { scan.setCaching(Integer.parseInt(caching)); } @@ -177,7 +174,7 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { table.close(); connection.close(); - ScanMetrics metrics = ProtobufUtil.toScanMetrics(scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA)); + ScanMetrics metrics = scanner.getScanMetrics(); long totalBytes = metrics.countOfBytesInResults.get(); double throughput = (double)totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS); double throughputRows = (double)numRows / scanTimer.elapsedTime(TimeUnit.SECONDS); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index c77ab29..17b2196 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -5003,28 +5003,30 @@ public class TestFromClientSide { Scan scan1 = new Scan(); int numRecords = 0; - for(Result result : ht.getScanner(scan1)) { + ResultScanner scanner = ht.getScanner(scan1); + for(Result result : scanner) { numRecords++; } + scanner.close(); LOG.info("test data has " + numRecords + " records."); // by default, scan metrics collection is turned off - assertEquals(null, scan1.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA)); + assertEquals(null, scanner.getScanMetrics()); // turn on scan metrics Scan scan = new Scan(); - scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); + scan.setMetricsEnabled(true); scan.setCaching(numRecords+1); - ResultScanner scanner = ht.getScanner(scan); + scanner = ht.getScanner(scan); for (Result result : scanner.next(numRecords - 1)) { } scanner.close(); // closing the scanner will set the metrics. - assertNotNull(scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA)); + assertNotNull(scanner.getScanMetrics()); // set caching to 1, becasue metrics are collected in each roundtrip only scan = new Scan(); - scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); + scan.setMetricsEnabled(true); scan.setCaching(1); scanner = ht.getScanner(scan); // per HBASE-5717, this should still collect even if you don't run all the way to @@ -5041,7 +5043,7 @@ public class TestFromClientSide { // run past the end of all the records Scan scanWithoutClose = new Scan(); scanWithoutClose.setCaching(1); - scanWithoutClose.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); + scan.setMetricsEnabled(true); ResultScanner scannerWithoutClose = ht.getScanner(scanWithoutClose); for (Result result : scannerWithoutClose.next(numRecords + 1)) { } @@ -5054,7 +5056,7 @@ public class TestFromClientSide { Scan scanWithClose = new Scan(); // make sure we can set caching up to the number of a scanned values scanWithClose.setCaching(numRecords); - scanWithClose.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); + scan.setMetricsEnabled(true); ResultScanner scannerWithClose = ht.getScanner(scanWithClose); for (Result result : scannerWithClose.next(numRecords + 1)) { } @@ -5068,7 +5070,6 @@ public class TestFromClientSide { byte[] serializedMetrics = scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA); assertTrue("Serialized metrics were not found.", serializedMetrics != null); - ScanMetrics scanMetrics = ProtobufUtil.toScanMetrics(serializedMetrics); return scanMetrics; @@ -5209,10 +5210,10 @@ public class TestFromClientSide { // Verify region location before move. HRegionLocation addrCache = table.getRegionLocation(regionInfo.getStartKey(), false); HRegionLocation addrNoCache = table.getRegionLocation(regionInfo.getStartKey(), true); - + assertEquals(addrBefore.getPort(), addrCache.getPort()); assertEquals(addrBefore.getPort(), addrNoCache.getPort()); - + ServerName addrAfter = null; // Now move the region to a different server. for (int i = 0; i < SLAVES; i++) { @@ -5227,7 +5228,7 @@ public class TestFromClientSide { break; } } - + // Verify the region was moved. addrCache = table.getRegionLocation(regionInfo.getStartKey(), false); addrNoCache = table.getRegionLocation(regionInfo.getStartKey(), true); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index a6c1cfe..4e86dd5 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -32,6 +33,7 @@ import org.apache.hadoop.hbase.HTestConst; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; import org.apache.hadoop.hbase.filter.ColumnRangeFilter; import org.apache.hadoop.hbase.master.HMaster; @@ -96,6 +98,26 @@ public class TestScannersFromClientSide { } /** + * Test ScanMetrics are available + * @throws IOException + */ + @Test (timeout=30000) + public void testScanMetrics() throws IOException { + TableName TABLE = TableName.valueOf("testScanMetrics"); + try (Table ht = TEST_UTIL.createTable(TABLE, FAMILY)) { + Scan scan = new Scan(); + scan.setMetricsEnabled(true); + try (ResultScanner scanner = ht.getScanner(scan)) { + while(scanner.next() != null) continue; + // Just make sure basically works. + ScanMetrics metrics = scanner.getScanMetrics(); + assertTrue(metrics != null); + assertEquals(1, metrics.countOfRegions); + } + } + } + + /** * Test from client side for batch of scan * * @throws Exception