diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java new file mode 100644 index 0000000..d479f8c --- /dev/null +++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import org.apache.hadoop.hbase.metrics.BaseSource; + +public interface MetricsIOSource extends BaseSource { + + /** + * The name of the metrics + */ + String METRICS_NAME = "IO"; + + /** + * The name of the metrics context that metrics will be under. + */ + String METRICS_CONTEXT = "regionserver"; + + /** + * Description + */ + String METRICS_DESCRIPTION = "Metrics about FileSystem IO"; + + /** + * The name of the metrics context that metrics will be under in jmx + */ + String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; + + String FS_READ_COUNT_KEY = "fsReadCount"; + String FS_PREAD_COUNT_KEY = "fsPReadCount"; + String FS_WRITE_COUNT_KEY = "fsWriteCount"; + + String FS_READ_TIME_HISTO_KEY = "fsReadTime"; + String FS_PREAD_TIME_HISTO_KEY = "fsPReadTime"; + String FS_WRITE_HISTO_KEY = "fsWriteTime"; + + String CHECKSUM_FAILURES_KEY = "fsChecksumFailureCount"; + + String FS_READ_COUNT_DESC = "Number of HFile's sequential reads on this region server"; + String FS_PREAD_COUNT_DESC = "Number of HFile's positional reads on this region server"; + String FS_WRITE_COUNT_DESC = "Number of HFile's writes on this region server in nanoseconds"; + + String FS_READ_TIME_HISTO_DESC + = "Latency of HFile's sequential reads on this region server in nanoseconds"; + String FS_PREAD_TIME_HISTO_DESC + = "Latency of HFile's positional reads on this region server in nanoseconds"; + String FS_WRITE_TIME_HISTO_DESC + = "Latency of HFile's writes on this region server in nanoseconds"; + + String CHECKSUM_FAILURES_DESC = "Number of checksum failures for the HBase HFile checksums at the" + + " HBase level (separate from HDFS checksums)"; + + + /** + * Update the fs sequential read time histogram + * @param t time it took, in milliseconds + */ + void updateFsReadTime(long t); + + /** + * Update the fs positional read time histogram + * @param t time it took, in milliseconds + */ + void updateFsPReadTime(long t); + + /** + * Update the fs write time histogram + * @param t time it took, in milliseconds + */ + void updateFsWriteTime(long t); +} diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java new file mode 100644 index 0000000..3a5277f --- /dev/null +++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +public interface MetricsIOWrapper { + + long getChecksumFailures(); + + long getReadOps(); + + long getPreadOps(); + + long getWriteOps(); +} diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java index bb44946..2de873d 100644 --- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java +++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hbase.regionserver; +import org.apache.hadoop.hbase.io.MetricsIOSource; +import org.apache.hadoop.hbase.io.MetricsIOWrapper; + /** * Interface of a factory to create Metrics Sources used inside of regionservers. */ @@ -38,4 +41,11 @@ public interface MetricsRegionServerSourceFactory { * @return A metrics region source */ MetricsRegionSource createRegion(MetricsRegionWrapper wrapper); + + /** + * Create a MetricsIOSource from a MetricsIOWrapper. + * + * @return A metrics IO source + */ + MetricsIOSource createIO(MetricsIOWrapper wrapper); } diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java new file mode 100644 index 0000000..114fad1 --- /dev/null +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; +import org.apache.hadoop.metrics2.MetricHistogram; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.lib.Interns; + +public class MetricsIOSourceImpl extends BaseSourceImpl implements MetricsIOSource { + + private final MetricsIOWrapper wrapper; + + private final MetricHistogram fsReadTimeHisto; + private final MetricHistogram fsPReadTimeHisto; + private final MetricHistogram fsWriteTimeHisto; + + public MetricsIOSourceImpl(MetricsIOWrapper wrapper) { + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, wrapper); + } + + public MetricsIOSourceImpl(String metricsName, + String metricsDescription, + String metricsContext, + String metricsJmxContext, + MetricsIOWrapper wrapper) { + super(metricsName, metricsDescription, metricsContext, metricsJmxContext); + + this.wrapper = wrapper; + + fsReadTimeHisto = getMetricsRegistry() + .newTimeHistogram(FS_READ_TIME_HISTO_KEY, FS_READ_TIME_HISTO_DESC); + fsPReadTimeHisto = getMetricsRegistry() + .newTimeHistogram(FS_PREAD_TIME_HISTO_KEY, FS_PREAD_TIME_HISTO_DESC); + fsWriteTimeHisto = getMetricsRegistry() + .newTimeHistogram(FS_WRITE_HISTO_KEY, FS_WRITE_TIME_HISTO_DESC); + } + + @Override + public void updateFsReadTime(long t) { + fsReadTimeHisto.add(t); + }; + + @Override + public void updateFsPReadTime(long t) { + fsPReadTimeHisto.add(t); + }; + + @Override + public void updateFsWriteTime(long t) { + fsWriteTimeHisto.add(t); + } + + @Override + public void getMetrics(MetricsCollector metricsCollector, boolean all) { + MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); + + // wrapper can be null because this function is called inside of init. + if (wrapper != null) { + mrb.addCounter(Interns.info(FS_READ_COUNT_KEY, FS_READ_COUNT_DESC), + wrapper.getReadOps()); + mrb.addCounter(Interns.info(FS_PREAD_COUNT_KEY, FS_PREAD_COUNT_DESC), + wrapper.getPreadOps()); + mrb.addCounter(Interns.info(FS_WRITE_COUNT_KEY, FS_WRITE_COUNT_DESC), + wrapper.getWriteOps()); + mrb.addCounter(Interns.info(CHECKSUM_FAILURES_KEY, CHECKSUM_FAILURES_DESC), + wrapper.getChecksumFailures()); + } + + metricsRegistry.snapshot(mrb, all); + } + +} diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java index c483083..666788a 100644 --- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.io.MetricsIOSource; +import org.apache.hadoop.hbase.io.MetricsIOSourceImpl; +import org.apache.hadoop.hbase.io.MetricsIOWrapper; /** * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper @@ -50,4 +53,9 @@ public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServer public MetricsRegionSource createRegion(MetricsRegionWrapper wrapper) { return new MetricsRegionSourceImpl(wrapper, getAggregate()); } + + @Override + public MetricsIOSource createIO(MetricsIOWrapper wrapper) { + return new MetricsIOSourceImpl(wrapper); + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java new file mode 100644 index 0000000..cb0b209 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory; +import com.google.common.annotations.VisibleForTesting; + +public class MetricsIO { + + private final MetricsIOSource source; + private final MetricsIOWrapper wrapper; + + public MetricsIO(MetricsIOWrapper wrapper) { + this(CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) + .createIO(wrapper), wrapper); + } + + MetricsIO(MetricsIOSource source, MetricsIOWrapper wrapper) { + this.source = source; + this.wrapper = wrapper; + } + + @VisibleForTesting + public MetricsIOSource getMetricsSource() { + return source; + } + + @VisibleForTesting + public MetricsIOWrapper getWrapper() { + return wrapper; + } + + public void updateFsReadTime(long t) { + source.updateFsReadTime(t); + } + + public void updateFsPreadTime(long t) { + source.updateFsPReadTime(t); + } + + public void updateFsWriteTime(long t) { + source.updateFsWriteTime(t); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java new file mode 100644 index 0000000..fbb3997 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import org.apache.hadoop.hbase.io.hfile.HFile; + +public class MetricsIOWrapperImpl implements MetricsIOWrapper { + + @Override + public long getChecksumFailures() { + return HFile.getChecksumFailuresCount(); + } + + @Override + public long getReadOps() { + return HFile.getReadOps(); + } + + @Override + public long getPreadOps() { + return HFile.getPreadOps(); + } + + @Override + public long getWriteOps() { + return HFile.getWriteOps(); + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 8582dbe..ce0ca37 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -34,7 +34,6 @@ import java.util.Map; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; - import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -51,6 +50,8 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; +import org.apache.hadoop.hbase.io.MetricsIO; +import org.apache.hadoop.hbase.io.MetricsIOWrapperImpl; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.protobuf.ProtobufMagic; @@ -185,16 +186,60 @@ public class HFile { // For tests. Gets incremented when we read a block whether from HDFS or from Cache. public static final Counter DATABLOCK_READ_COUNT = new Counter(); + /** Static instance for the metrics so that HFileReaders access the same instance */ + static final MetricsIO metrics = new MetricsIO(new MetricsIOWrapperImpl()); + + private static final Counter readOps = new Counter(); + private static final Counter preadOps = new Counter(); + private static final Counter writeOps = new Counter(); + + /** + * Number of checksum verification failures. + */ + public static final long getChecksumFailuresCount() { + long count = CHECKSUM_FAILURES.get(); + CHECKSUM_FAILURES.set(0); + return count; + } + /** * Number of checksum verification failures. It also * clears the counter. */ - public static final long getChecksumFailuresCount() { + @VisibleForTesting + public static final long getAndResetChecksumFailuresCount() { long count = CHECKSUM_FAILURES.get(); CHECKSUM_FAILURES.set(0); return count; } + public static final void updateReadLatency(long latencyNanos, boolean pread) { + if (pread) { + preadOps.increment(); + metrics.updateFsPreadTime(latencyNanos); + } else { + readOps.increment(); + metrics.updateFsReadTime(latencyNanos); + } + } + + public static final void updateWriteLatency(long latencyNanos) { + writeOps.increment(); + metrics.updateFsWriteTime(latencyNanos); + } + + public static long getReadOps() { + return readOps.get(); + } + + public static long getPreadOps() { + return preadOps.get(); + } + + public static long getWriteOps() { + return writeOps.get(); + } + /** API required to write an {@link HFile} */ public interface Writer extends Closeable { /** Max memstore (mvcc) timestamp in FileInfo */ @@ -250,7 +295,7 @@ public class HFile { protected FileSystem fs; protected Path path; protected FSDataOutputStream ostream; - protected CellComparator comparator = + protected CellComparator comparator = CellComparator.COMPARATOR; protected InetSocketAddress[] favoredNodes; private HFileContext fileContext; @@ -461,9 +506,9 @@ public class HFile { * Return the file context of the HFile this reader belongs to */ HFileContext getFileContext(); - + boolean isPrimaryReplicaReader(); - + void setPrimaryReplicaReader(boolean isPrimaryReplicaReader); boolean shouldIncludeMemstoreTS(); @@ -657,82 +702,102 @@ public class HFile { return this; } + @Override public void clear() { this.map.clear(); } + @Override public Comparator comparator() { return map.comparator(); } + @Override public boolean containsKey(Object key) { return map.containsKey(key); } + @Override public boolean containsValue(Object value) { return map.containsValue(value); } + @Override public Set> entrySet() { return map.entrySet(); } + @Override public boolean equals(Object o) { return map.equals(o); } + @Override public byte[] firstKey() { return map.firstKey(); } + @Override public byte[] get(Object key) { return map.get(key); } + @Override public int hashCode() { return map.hashCode(); } + @Override public SortedMap headMap(byte[] toKey) { return this.map.headMap(toKey); } + @Override public boolean isEmpty() { return map.isEmpty(); } + @Override public Set keySet() { return map.keySet(); } + @Override public byte[] lastKey() { return map.lastKey(); } + @Override public byte[] put(byte[] key, byte[] value) { return this.map.put(key, value); } + @Override public void putAll(Map m) { this.map.putAll(m); } + @Override public byte[] remove(Object key) { return this.map.remove(key); } + @Override public int size() { return map.size(); } + @Override public SortedMap subMap(byte[] fromKey, byte[] toKey) { return this.map.subMap(fromKey, toKey); } + @Override public SortedMap tailMap(byte[] fromKey) { return this.map.tailMap(fromKey); } + @Override public Collection values() { return map.values(); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index f3402da..a4603f6 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -233,6 +233,7 @@ public class HFileBlock implements Cacheable { */ static final CacheableDeserializer BLOCK_DESERIALIZER = new CacheableDeserializer() { + @Override public HFileBlock deserialize(ByteBuff buf, boolean reuse, MemoryType memType) throws IOException { // The buf has the file block followed by block metadata. @@ -403,6 +404,7 @@ public class HFileBlock implements Cacheable { return nextBlockOnDiskSize; } + @Override public BlockType getBlockType() { return blockType; } @@ -1076,8 +1078,10 @@ public class HFileBlock implements Cacheable { protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) throws IOException { ensureBlockReady(); + long startTime = System.nanoTime(); out.write(onDiskBlockBytesWithHeader); out.write(onDiskChecksum); + HFile.updateWriteLatency(System.nanoTime() - startTime); } /** @@ -1415,6 +1419,7 @@ public class HFileBlock implements Cacheable { this(new FSDataInputStreamWrapper(istream), fileSize, null, null, fileContext); } + @Override public BlockIterator blockRange(final long startOffset, final long endOffset) { final FSReader owner = this; // handle for inner class return new BlockIterator() { @@ -1469,10 +1474,12 @@ public class HFileBlock implements Cacheable { "-byte array at offset " + destOffset); } + long startTimeNano = System.nanoTime(); if (!pread && streamLock.tryLock()) { // Seek + read. Better for scanning. try { istream.seek(fileOffset); + // TODO: do we need seek time latencies? long realOffset = istream.getPos(); if (realOffset != fileOffset) { @@ -1483,11 +1490,13 @@ public class HFileBlock implements Cacheable { if (!peekIntoNextBlock) { IOUtils.readFully(istream, dest, destOffset, size); + HFile.updateReadLatency(System.nanoTime() - startTimeNano, false); return -1; } // Try to read the next block header. if (!readWithExtra(istream, dest, destOffset, size, hdrSize)) { + HFile.updateReadLatency(System.nanoTime() - startTimeNano, false); return -1; } } finally { @@ -1497,6 +1506,7 @@ public class HFileBlock implements Cacheable { // Positional read. Better for random reads; or when the streamLock is already locked. int extraSize = peekIntoNextBlock ? hdrSize : 0; if (!positionalReadWithExtra(istream, fileOffset, dest, destOffset, size, extraSize)) { + HFile.updateReadLatency(System.nanoTime() - startTimeNano, true); return -1; } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 5769744..fb6c248 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -207,7 +207,9 @@ public class HFileWriterImpl implements HFile.Writer { throws IOException { trailer.setFileInfoOffset(outputStream.getPos()); finishFileInfo(); + long startTime = System.nanoTime(); fileInfo.write(out); + HFile.updateWriteLatency(System.nanoTime() - startTime); } /** @@ -327,13 +329,13 @@ public class HFileWriterImpl implements HFile.Writer { doCacheOnWrite(lastDataBlockOffset); } } - + /** * Try to return a Cell that falls between left and * right but that is shorter; i.e. takes up less space. This * trick is used building HFile block index. Its an optimization. It does not * always work. In this case we'll just return the right cell. - * + * * @param comparator * Comparator to use. * @param left @@ -757,11 +759,13 @@ public class HFileWriterImpl implements HFile.Writer { trailer.setEntryCount(entryCount); trailer.setCompressionCodec(hFileContext.getCompression()); + long startTime = System.nanoTime(); trailer.serialize(outputStream); if (closeOutputStream) { outputStream.close(); outputStream = null; } + HFile.updateWriteLatency(System.nanoTime() - startTime); } } \ No newline at end of file diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestMetricsIO.java hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestMetricsIO.java new file mode 100644 index 0000000..53c76e5 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestMetricsIO.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import org.apache.hadoop.hbase.CompatibilityFactory; +import org.apache.hadoop.hbase.test.MetricsAssertHelper; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(SmallTests.class) +public class TestMetricsIO { + + public MetricsAssertHelper HELPER = CompatibilityFactory.getInstance(MetricsAssertHelper.class); + + @Test + public void testMetrics() { + MetricsIO metrics = new MetricsIO(new MetricsIOWrapper() { + @Override + public long getWriteOps() { return 10; } + @Override + public long getReadOps() { return 20; } + @Override + public long getPreadOps() { return 30; } + @Override + public long getChecksumFailures() { return 40; } + }); + + metrics.updateFsReadTime(100); + metrics.updateFsReadTime(200); + + metrics.updateFsPreadTime(300); + + metrics.updateFsWriteTime(400); + metrics.updateFsWriteTime(500); + metrics.updateFsWriteTime(600); + + HELPER.assertCounter("fsWriteCount", 10, metrics.getMetricsSource()); + HELPER.assertCounter("fsReadCount", 20, metrics.getMetricsSource()); + HELPER.assertCounter("fsPReadCount", 30, metrics.getMetricsSource()); + HELPER.assertCounter("fsChecksumFailureCount", 40, metrics.getMetricsSource()); + + HELPER.assertCounter("fsReadTime_numOps", 2, metrics.getMetricsSource()); + HELPER.assertCounter("fsPReadTime_numOps", 1, metrics.getMetricsSource()); + HELPER.assertCounter("fsWriteTime_numOps", 3, metrics.getMetricsSource()); + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java index d91a811..8ba6d46 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java @@ -139,7 +139,7 @@ public class TestChecksum { exception_thrown = true; } assertTrue(exception_thrown); - assertEquals(0, HFile.getChecksumFailuresCount()); + assertEquals(0, HFile.getAndResetChecksumFailuresCount()); } } @@ -203,7 +203,7 @@ public class TestChecksum { // assert that we encountered hbase checksum verification failures // but still used hdfs checksums and read data successfully. - assertEquals(1, HFile.getChecksumFailuresCount()); + assertEquals(1, HFile.getAndResetChecksumFailuresCount()); validateData(in); // A single instance of hbase checksum failure causes the reader to @@ -212,17 +212,17 @@ public class TestChecksum { for (int i = 0; i < HFileBlock.CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD + 1; i++) { b = hbr.readBlockData(0, -1, pread); - assertEquals(0, HFile.getChecksumFailuresCount()); + assertEquals(0, HFile.getAndResetChecksumFailuresCount()); } // The next read should have hbase checksum verification reanabled, // we verify this by assertng that there was a hbase-checksum failure. b = hbr.readBlockData(0, -1, pread); - assertEquals(1, HFile.getChecksumFailuresCount()); + assertEquals(1, HFile.getAndResetChecksumFailuresCount()); // Since the above encountered a checksum failure, we switch // back to not checking hbase checksums. b = hbr.readBlockData(0, -1, pread); - assertEquals(0, HFile.getChecksumFailuresCount()); + assertEquals(0, HFile.getAndResetChecksumFailuresCount()); is.close(); // Now, use a completely new reader. Switch off hbase checksums in @@ -246,7 +246,7 @@ public class TestChecksum { // assert that we did not encounter hbase checksum verification failures // but still used hdfs checksums and read data successfully. - assertEquals(0, HFile.getChecksumFailuresCount()); + assertEquals(0, HFile.getAndResetChecksumFailuresCount()); validateData(in); } } @@ -326,7 +326,7 @@ public class TestChecksum { expectedChunks * HFileBlock.CHECKSUM_SIZE); // assert that we did not encounter hbase checksum verification failures - assertEquals(0, HFile.getChecksumFailuresCount()); + assertEquals(0, HFile.getAndResetChecksumFailuresCount()); } } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java index eb87a0c..ec8abe6 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java @@ -322,7 +322,7 @@ public class TestHFileBlock { HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, totalSize, meta); HFileBlock b = hbr.readBlockData(0, -1, pread); is.close(); - assertEquals(0, HFile.getChecksumFailuresCount()); + assertEquals(0, HFile.getAndResetChecksumFailuresCount()); b.sanityCheck(); assertEquals(4936, b.getUncompressedSizeWithoutHeader()); @@ -423,7 +423,7 @@ public class TestHFileBlock { int pos = 0; for (int blockId = 0; blockId < numBlocks; ++blockId) { blockFromHFile = hbr.readBlockData(pos, -1, pread); - assertEquals(0, HFile.getChecksumFailuresCount()); + assertEquals(0, HFile.getAndResetChecksumFailuresCount()); blockFromHFile.sanityCheck(); pos += blockFromHFile.getOnDiskSizeWithHeader(); assertEquals((int) encodedSizes.get(blockId), @@ -587,7 +587,7 @@ public class TestHFileBlock { assertEquals(b.getBytesPerChecksum(), b2.getBytesPerChecksum()); assertEquals(b.getOnDiskDataSizeWithHeader(), b2.getOnDiskDataSizeWithHeader()); - assertEquals(0, HFile.getChecksumFailuresCount()); + assertEquals(0, HFile.getAndResetChecksumFailuresCount()); curOffset += b.getOnDiskSizeWithHeader(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java index 3264558..47936d3 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java @@ -100,7 +100,7 @@ public class TestHFileEncryption { private long readAndVerifyBlock(long pos, HFileContext ctx, HFileBlock.FSReaderImpl hbr, int size) throws IOException { HFileBlock b = hbr.readBlockData(pos, -1, false); - assertEquals(0, HFile.getChecksumFailuresCount()); + assertEquals(0, HFile.getAndResetChecksumFailuresCount()); b.sanityCheck(); assertFalse(b.isUnpacked()); b = b.unpack(ctx, hbr);