From 1d186503316884abc57c3dcedeab40a1ce3a2987 Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Thu, 17 Dec 2015 23:13:15 -0800 Subject: [PATCH] HBASE-14282 Remove metrics2 --- conf/hadoop-metrics2-hbase.properties | 44 -- hbase-assembly/pom.xml | 9 - .../src/main/assembly/hadoop-two-compat.xml | 2 - hbase-assembly/src/main/assembly/src.xml | 2 - hbase-hadoop-compat/pom.xml | 142 ---- .../apache/hadoop/hbase/CompatibilityFactory.java | 75 -- .../hbase/CompatibilitySingletonFactory.java | 88 --- .../hadoop/hbase/ipc/MetricsHBaseServerSource.java | 119 ---- .../hbase/ipc/MetricsHBaseServerSourceFactory.java | 57 -- .../hbase/ipc/MetricsHBaseServerWrapper.java | 29 - .../master/MetricsAssignmentManagerSource.java | 75 -- .../master/MetricsMasterFileSystemSource.java | 64 -- .../hbase/master/MetricsMasterProcSource.java | 53 -- .../master/MetricsMasterProcSourceFactory.java | 28 - .../hadoop/hbase/master/MetricsMasterSource.java | 86 --- .../hbase/master/MetricsMasterSourceFactory.java | 28 - .../hadoop/hbase/master/MetricsMasterWrapper.java | 115 ---- .../hadoop/hbase/master/MetricsSnapshotSource.java | 56 -- .../master/balancer/MetricsBalancerSource.java | 51 -- .../balancer/MetricsStochasticBalancerSource.java | 39 -- .../apache/hadoop/hbase/metrics/BaseSource.java | 115 ---- .../apache/hadoop/hbase/metrics/MBeanSource.java | 38 -- .../regionserver/MetricsRegionAggregateSource.java | 65 -- .../regionserver/MetricsRegionServerSource.java | 330 --------- .../MetricsRegionServerSourceFactory.java | 41 -- .../regionserver/MetricsRegionServerWrapper.java | 370 ---------- .../hbase/regionserver/MetricsRegionSource.java | 85 --- .../hbase/regionserver/MetricsRegionWrapper.java | 89 --- .../regionserver/wal/MetricsEditsReplaySource.java | 72 -- .../hbase/regionserver/wal/MetricsWALSource.java | 96 --- .../regionserver/MetricsReplicationSinkSource.java | 32 - .../regionserver/MetricsReplicationSource.java | 48 -- .../MetricsReplicationSourceFactory.java | 25 - .../MetricsReplicationSourceSource.java | 53 -- .../hadoop/hbase/rest/MetricsRESTSource.java | 116 ---- .../hbase/thrift/MetricsThriftServerSource.java | 78 --- .../thrift/MetricsThriftServerSourceFactory.java | 37 - .../apache/hadoop/metrics2/MetricHistogram.java | 44 -- .../apache/hadoop/metrics2/MetricsExecutor.java | 32 - .../java/org/apache/hadoop/hbase/HadoopShims.java | 37 - .../apache/hadoop/hbase/RandomStringGenerator.java | 23 - .../hadoop/hbase/RandomStringGeneratorImpl.java | 36 - .../hbase/TestCompatibilitySingletonFactory.java | 89 --- .../master/TestMetricsMasterSourceFactory.java | 40 -- .../TestMetricsRegionServerSourceFactory.java | 40 -- .../regionserver/wal/TestMetricsWALSource.java | 36 - .../TestMetricsReplicationSourceFactory.java | 39 -- .../hadoop/hbase/rest/TestMetricsRESTSource.java | 41 -- .../hadoop/hbase/test/MetricsAssertHelper.java | 171 ----- .../TestMetricsThriftServerSourceFactory.java | 41 -- .../org.apache.hadoop.hbase.RandomStringGenerator | 18 - hbase-hadoop2-compat/pom.xml | 216 ------ .../ipc/MetricsHBaseServerSourceFactoryImpl.java | 62 -- .../hbase/ipc/MetricsHBaseServerSourceImpl.java | 227 ------- .../org/apache/hadoop/hbase/mapreduce/JobUtil.java | 56 -- .../master/MetricsAssignmentManagerSourceImpl.java | 74 -- .../master/MetricsMasterFilesystemSourceImpl.java | 73 -- .../master/MetricsMasterProcSourceFactoryImpl.java | 38 -- .../hbase/master/MetricsMasterProcSourceImpl.java | 75 -- .../master/MetricsMasterSourceFactoryImpl.java | 40 -- .../hbase/master/MetricsMasterSourceImpl.java | 104 --- .../hbase/master/MetricsSnapshotSourceImpl.java | 66 -- .../master/balancer/MetricsBalancerSourceImpl.java | 59 -- .../MetricsStochasticBalancerSourceImpl.java | 110 --- .../hadoop/hbase/metrics/BaseSourceImpl.java | 176 ----- .../hadoop/hbase/metrics/MBeanSourceImpl.java | 43 -- .../MetricsRegionAggregateSourceImpl.java | 112 --- .../MetricsRegionServerSourceFactoryImpl.java | 53 -- .../MetricsRegionServerSourceImpl.java | 318 --------- .../regionserver/MetricsRegionSourceImpl.java | 259 ------- .../wal/MetricsEditsReplaySourceImpl.java | 76 --- .../regionserver/wal/MetricsWALSourceImpl.java | 106 --- .../MetricsReplicationGlobalSourceSource.java | 124 ---- .../MetricsReplicationSinkSourceImpl.java | 59 -- .../MetricsReplicationSourceFactoryImpl.java | 38 -- .../regionserver/MetricsReplicationSourceImpl.java | 45 -- .../MetricsReplicationSourceSourceImpl.java | 161 ----- .../hadoop/hbase/rest/MetricsRESTSourceImpl.java | 115 ---- .../MetricsThriftServerSourceFactoryImpl.java | 54 -- .../thrift/MetricsThriftServerSourceImpl.java | 99 --- .../hadoop/metrics2/impl/JmxCacheBuster.java | 86 --- .../metrics2/lib/DefaultMetricsSystemHelper.java | 55 -- .../metrics2/lib/DynamicMetricsRegistry.java | 609 ----------------- .../metrics2/lib/MetricMutableQuantiles.java | 154 ----- .../hadoop/metrics2/lib/MetricsExecutorImpl.java | 70 -- .../hadoop/metrics2/lib/MutableHistogram.java | 141 ---- .../hadoop/metrics2/lib/MutableRangeHistogram.java | 94 --- .../hadoop/metrics2/lib/MutableSizeHistogram.java | 57 -- .../hadoop/metrics2/lib/MutableTimeHistogram.java | 58 -- .../hadoop/metrics2/util/MetricQuantile.java | 60 -- .../metrics2/util/MetricSampleQuantiles.java | 310 --------- ...adoop.hbase.ipc.MetricsHBaseServerSourceFactory | 18 - ...oop.hbase.master.MetricsAssignmentManagerSource | 18 - ...doop.hbase.master.MetricsMasterFileSystemSource | 18 - ...oop.hbase.master.MetricsMasterProcSourceFactory | 18 - ....hadoop.hbase.master.MetricsMasterSourceFactory | 18 - ...pache.hadoop.hbase.master.MetricsSnapshotSource | 18 - ...oop.hbase.master.balancer.MetricsBalancerSource | 18 - ...master.balancer.MetricsStochasticBalancerSource | 18 - .../org.apache.hadoop.hbase.metrics.MBeanSource | 18 - ...e.regionserver.MetricsRegionServerSourceFactory | 18 - ...hbase.regionserver.wal.MetricsEditsReplaySource | 18 - ....hadoop.hbase.regionserver.wal.MetricsWALSource | 18 - ...plication.regionserver.MetricsReplicationSource | 18 - ...on.regionserver.MetricsReplicationSourceFactory | 18 - .../org.apache.hadoop.hbase.rest.MetricsRESTSource | 18 - ...p.hbase.thrift.MetricsThriftServerSourceFactory | 18 - .../org.apache.hadoop.metrics2.MetricsExecutor | 18 - .../org/apache/hadoop/hbase/HadoopShimsImpl.java | 43 -- .../master/TestMetricsMasterProcSourceImpl.java | 46 -- .../hbase/master/TestMetricsMasterSourceImpl.java | 45 -- .../hadoop/hbase/metrics/TestBaseSourceImpl.java | 88 --- .../TestMetricsRegionServerSourceImpl.java | 54 -- .../regionserver/TestMetricsRegionSourceImpl.java | 142 ---- .../regionserver/wal/TestMetricsWALSourceImpl.java | 40 -- .../TestMetricsReplicationSourceFactoryImpl.java | 39 -- .../TestMetricsReplicationSourceImpl.java | 41 -- .../hbase/rest/TestMetricsRESTSourceImpl.java | 44 -- .../hadoop/hbase/test/MetricsAssertHelperImpl.java | 250 ------- .../TestMetricsThriftServerSourceFactoryImpl.java | 59 -- .../services/org.apache.hadoop.hbase.HadoopShims | 18 - ...rg.apache.hadoop.hbase.test.MetricsAssertHelper | 18 - hbase-it/pom.xml | 9 - .../IntegrationTestTableMapReduceUtil.java | 1 - hbase-prefix-tree/pom.xml | 9 - hbase-rest/pom.xml | 9 - .../org/apache/hadoop/hbase/rest/MetricsREST.java | 103 --- .../apache/hadoop/hbase/rest/MultiRowResource.java | 4 - .../hbase/rest/NamespacesInstanceResource.java | 26 +- .../hadoop/hbase/rest/NamespacesResource.java | 3 - .../org/apache/hadoop/hbase/rest/RESTServlet.java | 5 - .../apache/hadoop/hbase/rest/RegionsResource.java | 4 - .../org/apache/hadoop/hbase/rest/RootResource.java | 3 - .../org/apache/hadoop/hbase/rest/RowResource.java | 38 -- .../hadoop/hbase/rest/ScannerInstanceResource.java | 26 +- .../apache/hadoop/hbase/rest/ScannerResource.java | 6 - .../apache/hadoop/hbase/rest/SchemaResource.java | 14 - .../hbase/rest/StorageClusterStatusResource.java | 3 - .../hbase/rest/StorageClusterVersionResource.java | 3 - .../apache/hadoop/hbase/rest/TableResource.java | 1 - .../hadoop/hbase/rest/TableScanResource.java | 5 - .../apache/hadoop/hbase/rest/VersionResource.java | 2 - .../hadoop/hbase/rest/TestGetAndPutResource.java | 33 - hbase-server/pom.xml | 22 - .../hbase/tmpl/regionserver/RSStatusTmpl.jamon | 5 - .../hbase/tmpl/regionserver/RegionListTmpl.jamon | 1 - .../tmpl/regionserver/ServerMetricsTmpl.jamon | 199 ------ .../hadoop/hbase/ipc/MetricsHBaseServer.java | 118 ---- .../hbase/ipc/MetricsHBaseServerWrapperImpl.java | 81 --- .../org/apache/hadoop/hbase/ipc/RpcServer.java | 29 - .../hadoop/hbase/ipc/RpcServerInterface.java | 5 - .../org/apache/hadoop/hbase/mapreduce/JobUtil.java | 56 ++ .../hadoop/hbase/mapreduce/TableMapReduceUtil.java | 1 - .../hadoop/hbase/master/AssignmentManager.java | 663 +++++++++--------- .../org/apache/hadoop/hbase/master/HMaster.java | 12 +- .../hadoop/hbase/master/MasterFileSystem.java | 10 - .../hadoop/hbase/master/MasterRpcServices.java | 5 - .../hbase/master/MetricsAssignmentManager.java | 63 -- .../apache/hadoop/hbase/master/MetricsMaster.java | 62 -- .../hbase/master/MetricsMasterFileSystem.java | 50 -- .../hbase/master/MetricsMasterWrapperImpl.java | 127 ---- .../hadoop/hbase/master/MetricsSnapshot.java | 54 -- .../hbase/master/balancer/BaseLoadBalancer.java | 22 +- .../hbase/master/balancer/MetricsBalancer.java | 49 -- .../master/balancer/MetricsStochasticBalancer.java | 71 -- .../master/balancer/StochasticLoadBalancer.java | 57 +- .../master/snapshot/CloneSnapshotHandler.java | 3 - .../master/snapshot/RestoreSnapshotHandler.java | 3 - .../hbase/master/snapshot/SnapshotManager.java | 5 +- .../hbase/master/snapshot/TakeSnapshotHandler.java | 3 - .../hbase/procedure/MasterProcedureManager.java | 3 +- .../procedure/MasterProcedureManagerHost.java | 5 +- .../flush/MasterFlushTableProcedureManager.java | 3 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 55 +- .../hadoop/hbase/regionserver/HRegionServer.java | 23 +- .../hadoop/hbase/regionserver/MemStoreFlusher.java | 1 - .../hadoop/hbase/regionserver/MetricsRegion.java | 76 --- .../hbase/regionserver/MetricsRegionServer.java | 119 ---- .../MetricsRegionServerWrapperImpl.java | 751 --------------------- .../regionserver/MetricsRegionWrapperImpl.java | 174 ----- .../hadoop/hbase/regionserver/RSRpcServices.java | 142 ++-- .../apache/hadoop/hbase/regionserver/Region.java | 2 - .../hadoop/hbase/regionserver/SplitRequest.java | 3 - .../hadoop/hbase/regionserver/wal/MetricsWAL.java | 75 -- .../regionserver/wal/MetricsWALEditsReplay.java | 59 -- .../hbase/regionserver/wal/WALEditsReplaySink.java | 5 - .../hbase/replication/ReplicationEndpoint.java | 7 - .../HBaseInterClusterReplicationEndpoint.java | 8 +- .../replication/regionserver/MetricsSink.java | 102 --- .../replication/regionserver/MetricsSource.java | 228 ------- .../RegionReplicaReplicationEndpoint.java | 2 - .../replication/regionserver/Replication.java | 42 +- .../replication/regionserver/ReplicationLoad.java | 42 +- .../replication/regionserver/ReplicationSink.java | 15 +- .../regionserver/ReplicationSource.java | 29 +- .../regionserver/ReplicationSourceInterface.java | 4 +- .../regionserver/ReplicationSourceManager.java | 5 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 3 +- .../hadoop/hbase/wal/RegionGroupingProvider.java | 3 +- .../org/apache/hadoop/hbase/wal/WALFactory.java | 3 +- .../apache/hadoop/hbase/HBaseTestingUtility.java | 3 +- .../org/apache/hadoop/hbase/MiniHBaseCluster.java | 4 - .../hbase/TestStochasticBalancerJmxMetrics.java | 277 -------- .../hbase/client/TestMultiRespectsLimits.java | 26 +- .../hbase/ipc/MetricsHBaseServerWrapperStub.java | 51 -- .../apache/hadoop/hbase/ipc/TestRpcMetrics.java | 142 ---- .../hbase/mapreduce/TestHFileOutputFormat2.java | 13 +- .../hadoop/hbase/master/TestMasterMetrics.java | 138 ---- .../hbase/master/TestMasterMetricsWrapper.java | 78 --- .../hbase/master/TestMasterStatusServlet.java | 6 - .../hbase/master/snapshot/TestSnapshotManager.java | 4 +- .../procedure/SimpleMasterProcedureManager.java | 3 +- .../MetricsRegionServerWrapperStub.java | 362 ---------- .../regionserver/MetricsRegionWrapperStub.java | 110 --- .../hadoop/hbase/regionserver/TestHRegion.java | 40 +- .../hbase/regionserver/TestMetricsRegion.java | 70 -- .../regionserver/TestMetricsRegionServer.java | 137 ---- .../hbase/regionserver/TestRSStatusServlet.java | 4 - .../regionserver/TestRegionServerMetrics.java | 558 --------------- .../regionserver/TestRemoveRegionMetrics.java | 137 ---- .../hbase/regionserver/wal/TestMetricsWAL.java | 69 -- .../hbase/replication/ReplicationSourceDummy.java | 3 +- ...stRegionReplicaReplicationEndpointNoMaster.java | 2 - hbase-shell/pom.xml | 9 - hbase-spark/pom.xml | 155 ----- hbase-testing-util/pom.xml | 24 - hbase-thrift/pom.xml | 9 - .../org/apache/hadoop/hbase/thrift/CallQueue.java | 7 +- .../hbase/thrift/HbaseHandlerMetricsProxy.java | 8 +- .../hbase/thrift/TBoundedThreadPoolServer.java | 6 +- .../apache/hadoop/hbase/thrift/ThriftMetrics.java | 90 --- .../hadoop/hbase/thrift/ThriftServerRunner.java | 22 +- .../hbase/thrift2/ThriftHBaseServiceHandler.java | 10 +- .../apache/hadoop/hbase/thrift2/ThriftServer.java | 16 +- .../apache/hadoop/hbase/thrift/TestCallQueue.java | 127 ---- .../hadoop/hbase/thrift/TestThriftServer.java | 67 -- .../thrift2/TestThriftHBaseServiceHandler.java | 41 -- pom.xml | 35 - 238 files changed, 484 insertions(+), 15955 deletions(-) delete mode 100644 conf/hadoop-metrics2-hbase.properties delete mode 100644 hbase-hadoop-compat/pom.xml delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java delete mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java delete mode 100644 hbase-hadoop-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.RandomStringGenerator delete mode 100644 hbase-hadoop2-compat/pom.xml delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactory delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsAssignmentManagerSource delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterFileSystemSource delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterProcSourceFactory delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsSnapshotSource delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.balancer.MetricsBalancerSource delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.balancer.MetricsStochasticBalancerSource delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.metrics.MBeanSource delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceFactory delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory delete mode 100644 hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java delete mode 100644 hbase-hadoop2-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.HadoopShims delete mode 100644 hbase-hadoop2-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.test.MetricsAssertHelper delete mode 100644 hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java delete mode 100644 hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshot.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALEditsReplay.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRemoveRegionMetrics.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWAL.java delete mode 100644 hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java delete mode 100644 hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java diff --git a/conf/hadoop-metrics2-hbase.properties b/conf/hadoop-metrics2-hbase.properties deleted file mode 100644 index 4c7dbbe..0000000 --- a/conf/hadoop-metrics2-hbase.properties +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# syntax: [prefix].[source|sink].[instance].[options] -# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details - -*.sink.file*.class=org.apache.hadoop.metrics2.sink.FileSink -# default sampling period -*.period=10 - -# Below are some examples of sinks that could be used -# to monitor different hbase daemons. - -# hbase.sink.file-all.class=org.apache.hadoop.metrics2.sink.FileSink -# hbase.sink.file-all.filename=all.metrics - -# hbase.sink.file0.class=org.apache.hadoop.metrics2.sink.FileSink -# hbase.sink.file0.context=hmaster -# hbase.sink.file0.filename=master.metrics - -# hbase.sink.file1.class=org.apache.hadoop.metrics2.sink.FileSink -# hbase.sink.file1.context=thrift-one -# hbase.sink.file1.filename=thrift-one.metrics - -# hbase.sink.file2.class=org.apache.hadoop.metrics2.sink.FileSink -# hbase.sink.file2.context=thrift-two -# hbase.sink.file2.filename=thrift-one.metrics - -# hbase.sink.file3.class=org.apache.hadoop.metrics2.sink.FileSink -# hbase.sink.file3.context=rest -# hbase.sink.file3.filename=rest.metrics diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index 4851391..0ce6714 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -162,15 +162,6 @@ hbase-server - org.apache.hbase - hbase-hadoop-compat - - - org.apache.hbase - ${compat.module} - ${project.version} - - org.apache.hbase hbase-shell ${project.version} diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml index 9ef624c..3cc7838 100644 --- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml +++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml @@ -36,8 +36,6 @@ org.apache.hbase:hbase-client org.apache.hbase:hbase-common org.apache.hbase:hbase-examples - org.apache.hbase:hbase-hadoop2-compat - org.apache.hbase:hbase-hadoop-compat org.apache.hbase:hbase-it org.apache.hbase:hbase-prefix-tree org.apache.hbase:hbase-procedure diff --git a/hbase-assembly/src/main/assembly/src.xml b/hbase-assembly/src/main/assembly/src.xml index cf73493..3d8281e 100644 --- a/hbase-assembly/src/main/assembly/src.xml +++ b/hbase-assembly/src/main/assembly/src.xml @@ -38,8 +38,6 @@ org.apache.hbase:hbase-common org.apache.hbase:hbase-examples org.apache.hbase:hbase-external-blockcache - org.apache.hbase:hbase-hadoop2-compat - org.apache.hbase:hbase-hadoop-compat org.apache.hbase:hbase-it org.apache.hbase:hbase-prefix-tree org.apache.hbase:hbase-procedure diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml deleted file mode 100644 index 016dd24..0000000 --- a/hbase-hadoop-compat/pom.xml +++ /dev/null @@ -1,142 +0,0 @@ - - - - 4.0.0 - - hbase - org.apache.hbase - 2.0.0-SNAPSHOT - .. - - - hbase-hadoop-compat - Apache HBase - Hadoop Compatibility - - Interfaces to be implemented in order to smooth - over hadoop version differences - - - - - - org.apache.maven.plugins - maven-site-plugin - - true - - - - - maven-assembly-plugin - ${maven.assembly.version} - - true - - - - maven-surefire-plugin - - - - secondPartTestsExecution - test - - test - - - true - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - - - - org.eclipse.m2e - lifecycle-mapping - 1.0.0 - - - - - - org.apache.maven.plugins - maven-compiler-plugin - [3.2,) - - compile - - - - - - - - - - - - - - - - - org.apache.hbase - hbase-annotations - test-jar - test - - - - commons-logging - commons-logging - - - org.apache.commons - commons-math - - - - - - - skipHadoopCompatTests - - - skipHadoopCompatTests - - - - true - - - - - diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java deleted file mode 100644 index 8305316..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import java.util.Iterator; -import java.util.ServiceLoader; - -/** - * Class that will create many instances of classes provided by the hbase-hadoop{1|2}-compat jars. - */ -public class CompatibilityFactory { - - private static final Log LOG = LogFactory.getLog(CompatibilitySingletonFactory.class); - public static final String EXCEPTION_START = "Could not create "; - public static final String EXCEPTION_END = " Is the hadoop compatibility jar on the classpath?"; - - /** - * This is a static only class don't let any instance be created. - */ - protected CompatibilityFactory() {} - - public static synchronized T getInstance(Class klass) { - T instance = null; - try { - ServiceLoader loader = ServiceLoader.load(klass); - Iterator it = loader.iterator(); - instance = it.next(); - if (it.hasNext()) { - StringBuilder msg = new StringBuilder(); - msg.append("ServiceLoader provided more than one implementation for class: ") - .append(klass) - .append(", using implementation: ").append(instance.getClass()) - .append(", other implementations: {"); - while (it.hasNext()) { - msg.append(it.next()).append(" "); - } - msg.append("}"); - LOG.warn(msg); - } - } catch (Exception e) { - throw new RuntimeException(createExceptionString(klass), e); - } catch (Error e) { - throw new RuntimeException(createExceptionString(klass), e); - } - - // If there was nothing returned and no exception then throw an exception. - if (instance == null) { - throw new RuntimeException(createExceptionString(klass)); - } - return instance; - } - - protected static String createExceptionString(Class klass) { - return EXCEPTION_START + klass.toString() + EXCEPTION_END; - } -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java deleted file mode 100644 index 78442ba..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.ServiceLoader; - -/** - * Factory for classes supplied by hadoop compatibility modules. Only one of each class will be - * created. - */ -public class CompatibilitySingletonFactory extends CompatibilityFactory { - public static enum SingletonStorage { - INSTANCE; - private final Object lock = new Object(); - private final Map instances = new HashMap(); - } - private static final Log LOG = LogFactory.getLog(CompatibilitySingletonFactory.class); - - /** - * This is a static only class don't let anyone create an instance. - */ - protected CompatibilitySingletonFactory() { } - - /** - * Get the singleton instance of Any classes defined by compatibiliy jar's - * - * @return the singleton - */ - @SuppressWarnings("unchecked") - public static T getInstance(Class klass) { - synchronized (SingletonStorage.INSTANCE.lock) { - T instance = (T) SingletonStorage.INSTANCE.instances.get(klass); - if (instance == null) { - try { - ServiceLoader loader = ServiceLoader.load(klass); - Iterator it = loader.iterator(); - instance = it.next(); - if (it.hasNext()) { - StringBuilder msg = new StringBuilder(); - msg.append("ServiceLoader provided more than one implementation for class: ") - .append(klass) - .append(", using implementation: ").append(instance.getClass()) - .append(", other implementations: {"); - while (it.hasNext()) { - msg.append(it.next()).append(" "); - } - msg.append("}"); - LOG.warn(msg); - } - } catch (Exception e) { - throw new RuntimeException(createExceptionString(klass), e); - } catch (Error e) { - throw new RuntimeException(createExceptionString(klass), e); - } - - // If there was nothing returned and no exception then throw an exception. - if (instance == null) { - throw new RuntimeException(createExceptionString(klass)); - } - SingletonStorage.INSTANCE.instances.put(klass, instance); - } - return instance; - } - - } -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java deleted file mode 100644 index 061a672..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -public interface MetricsHBaseServerSource extends BaseSource { - String AUTHORIZATION_SUCCESSES_NAME = "authorizationSuccesses"; - String AUTHORIZATION_SUCCESSES_DESC = - "Number of authorization successes."; - String AUTHORIZATION_FAILURES_NAME = "authorizationFailures"; - String AUTHORIZATION_FAILURES_DESC = - "Number of authorization failures."; - String AUTHENTICATION_SUCCESSES_NAME = "authenticationSuccesses"; - String AUTHENTICATION_SUCCESSES_DESC = - "Number of authentication successes."; - String AUTHENTICATION_FAILURES_NAME = "authenticationFailures"; - String AUTHENTICATION_FAILURES_DESC = - "Number of authentication failures."; - String AUTHENTICATION_FALLBACKS_NAME = "authenticationFallbacks"; - String AUTHENTICATION_FALLBACKS_DESC = - "Number of fallbacks to insecure authentication."; - String SENT_BYTES_NAME = "sentBytes"; - String SENT_BYTES_DESC = "Number of bytes sent."; - String RECEIVED_BYTES_NAME = "receivedBytes"; - String RECEIVED_BYTES_DESC = "Number of bytes received."; - String REQUEST_SIZE_NAME = "requestSize"; - String REQUEST_SIZE_DESC = "Request size in bytes."; - String RESPONSE_SIZE_NAME = "responseSize"; - String RESPONSE_SIZE_DESC = "Response size in bytes."; - String QUEUE_CALL_TIME_NAME = "queueCallTime"; - String QUEUE_CALL_TIME_DESC = "Queue Call Time."; - String PROCESS_CALL_TIME_NAME = "processCallTime"; - String PROCESS_CALL_TIME_DESC = "Processing call time."; - String TOTAL_CALL_TIME_NAME = "totalCallTime"; - String TOTAL_CALL_TIME_DESC = "Total call time, including both queued and processing time."; - String QUEUE_SIZE_NAME = "queueSize"; - String QUEUE_SIZE_DESC = "Number of bytes in the call queues."; - String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue"; - String GENERAL_QUEUE_DESC = "Number of calls in the general call queue."; - String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue"; - String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue"; - String REPLICATION_QUEUE_DESC = - "Number of calls in the replication call queue."; - String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue."; - String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections"; - String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections."; - String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler"; - String NUM_ACTIVE_HANDLER_DESC = "Number of active rpc handlers."; - - String EXCEPTIONS_NAME="exceptions"; - String EXCEPTIONS_DESC="Exceptions caused by requests"; - String EXCEPTIONS_TYPE_DESC="Number of requests that resulted in the specified type of Exception"; - String EXCEPTIONS_OOO_NAME="exceptions.OutOfOrderScannerNextException"; - String EXCEPTIONS_BUSY_NAME="exceptions.RegionTooBusyException"; - String EXCEPTIONS_UNKNOWN_NAME="exceptions.UnknownScannerException"; - String EXCEPTIONS_SANITY_NAME="exceptions.FailedSanityCheckException"; - String EXCEPTIONS_MOVED_NAME="exceptions.RegionMovedException"; - String EXCEPTIONS_NSRE_NAME="exceptions.NotServingRegionException"; - String EXCEPTIONS_MULTI_TOO_LARGE_NAME = "exceptions.multiResponseTooLarge"; - String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " + - "rest of the requests will have to be retried."; - - void authorizationSuccess(); - - void authorizationFailure(); - - void authenticationSuccess(); - - void authenticationFailure(); - - void authenticationFallback(); - - void exception(); - - /** - * Different types of exceptions - */ - void outOfOrderException(); - void failedSanityException(); - void movedRegionException(); - void notServingRegionException(); - void unknownScannerException(); - void tooBusyException(); - void multiActionTooLargeException(); - - void sentBytes(long count); - - void receivedBytes(int count); - - void sentResponse(long count); - - void receivedRequest(long count); - - void dequeuedCall(int qTime); - - void processedCall(int processingTime); - - void queuedAndProcessedCall(int totalTime); - - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java deleted file mode 100644 index d6b1392..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -public abstract class MetricsHBaseServerSourceFactory { - /** - * The name of the metrics - */ - static final String METRICS_NAME = "IPC"; - - /** - * Description - */ - static final String METRICS_DESCRIPTION = "Metrics about HBase Server IPC"; - - /** - * The Suffix of the JMX Context that a MetricsHBaseServerSource will register under. - * - * JMX_CONTEXT will be created by createContextName(serverClassName) + METRICS_JMX_CONTEXT_SUFFIX - */ - static final String METRICS_JMX_CONTEXT_SUFFIX = ",sub=" + METRICS_NAME; - - abstract MetricsHBaseServerSource create(String serverName, MetricsHBaseServerWrapper wrapper); - - /** - * From the name of the class that's starting up create the - * context that an IPC source should register itself. - * - * @param serverName The name of the class that's starting up. - * @return The Camel Cased context name. - */ - protected static String createContextName(String serverName) { - if (serverName.contains("HMaster") || serverName.contains("master")) { - return "Master"; - } else if (serverName.contains("HRegion") || serverName.contains("regionserver")) { - return "RegionServer"; - } - return "IPC"; - } -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java deleted file mode 100644 index 1885264..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java +++ /dev/null @@ -1,29 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -public interface MetricsHBaseServerWrapper { - long getTotalQueueSize(); - int getGeneralQueueLength(); - int getReplicationQueueLength(); - int getPriorityQueueLength(); - int getNumOpenConnections(); - int getActiveRpcHandlerCount(); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java deleted file mode 100644 index 92fd111..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -public interface MetricsAssignmentManagerSource extends BaseSource { - - /** - * The name of the metrics - */ - String METRICS_NAME = "AssignmentManger"; - - /** - * The context metrics will be under. - */ - String METRICS_CONTEXT = "master"; - - /** - * The name of the metrics context that metrics will be under in jmx - */ - String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME; - - /** - * Description - */ - String METRICS_DESCRIPTION = "Metrics about HBase master assingment manager."; - - String RIT_COUNT_NAME = "ritCount"; - String RIT_COUNT_OVER_THRESHOLD_NAME = "ritCountOverThreshold"; - String RIT_OLDEST_AGE_NAME = "ritOldestAge"; - String ASSIGN_TIME_NAME = "assign"; - String BULK_ASSIGN_TIME_NAME = "bulkAssign"; - - void updateAssignmentTime(long time); - - void updateBulkAssignTime(long time); - - /** - * Set the number of regions in transition. - * - * @param ritCount count of the regions in transition. - */ - void setRIT(int ritCount); - - /** - * Set the count of the number of regions that have been in transition over the threshold time. - * - * @param ritCountOverThreshold number of regions in transition for longer than threshold. - */ - void setRITCountOverThreshold(int ritCountOverThreshold); - - /** - * Set the oldest region in transition. - * - * @param age age of the oldest RIT. - */ - void setRITOldestAge(long age); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java deleted file mode 100644 index 6cf942b..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -public interface MetricsMasterFileSystemSource extends BaseSource { - - /** - * The name of the metrics - */ - String METRICS_NAME = "FileSystem"; - - /** - * The context metrics will be under. - */ - String METRICS_CONTEXT = "master"; - - /** - * The name of the metrics context that metrics will be under in jmx - */ - String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME; - - /** - * Description - */ - String METRICS_DESCRIPTION = "Metrics about HBase master file system."; - - String META_SPLIT_TIME_NAME = "metaHlogSplitTime"; - String META_SPLIT_SIZE_NAME = "metaHlogSplitSize"; - String SPLIT_TIME_NAME = "hlogSplitTime"; - String SPLIT_SIZE_NAME = "hlogSplitSize"; - - String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()"; - String META_SPLIT_SIZE_DESC = "Size of hbase:meta WAL files being split"; - String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()"; - String SPLIT_SIZE_DESC = "Size of WAL files being split"; - - - void updateMetaWALSplitTime(long time); - - void updateMetaWALSplitSize(long size); - - void updateSplitTime(long time); - - void updateSplitSize(long size); - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java deleted file mode 100644 index 51a17a8..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -/** - * Interface that classes that expose metrics about the master will implement. - */ -public interface MetricsMasterProcSource extends BaseSource { - - /** - * The name of the metrics - */ - String METRICS_NAME = "Procedure"; - - /** - * The context metrics will be under. - */ - String METRICS_CONTEXT = "master"; - - /** - * The name of the metrics context that metrics will be under in jmx - */ - String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME; - - /** - * Description - */ - String METRICS_DESCRIPTION = "Metrics about HBase master procedure"; - - // Strings used for exporting to metrics system. - String NUM_MASTER_WALS_NAME = "numMasterWALs"; - - String NUM_MASTER_WALS_DESC = "Number of master WAL files"; - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java deleted file mode 100644 index b282e06..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -/** - * Interface of a factory to create MetricsMasterSource when given a MetricsMasterWrapper - */ -public interface MetricsMasterProcSourceFactory { - - MetricsMasterProcSource create(MetricsMasterWrapper masterWrapper); - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java deleted file mode 100644 index ab621cc..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -/** - * Interface that classes that expose metrics about the master will implement. - */ -public interface MetricsMasterSource extends BaseSource { - - /** - * The name of the metrics - */ - String METRICS_NAME = "Server"; - - /** - * The context metrics will be under. - */ - String METRICS_CONTEXT = "master"; - - /** - * The name of the metrics context that metrics will be under in jmx - */ - String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME; - - /** - * Description - */ - String METRICS_DESCRIPTION = "Metrics about HBase master server"; - - // Strings used for exporting to metrics system. - String MASTER_ACTIVE_TIME_NAME = "masterActiveTime"; - String MASTER_START_TIME_NAME = "masterStartTime"; - String AVERAGE_LOAD_NAME = "averageLoad"; - String LIVE_REGION_SERVERS_NAME = "liveRegionServers"; - String DEAD_REGION_SERVERS_NAME = "deadRegionServers"; - String NUM_REGION_SERVERS_NAME = "numRegionServers"; - String NUM_DEAD_REGION_SERVERS_NAME = "numDeadRegionServers"; - String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum"; - String SERVER_NAME_NAME = "serverName"; - String CLUSTER_ID_NAME = "clusterId"; - String IS_ACTIVE_MASTER_NAME = "isActiveMaster"; - - String CLUSTER_REQUESTS_NAME = "clusterRequests"; - String MASTER_ACTIVE_TIME_DESC = "Master Active Time"; - String MASTER_START_TIME_DESC = "Master Start Time"; - String AVERAGE_LOAD_DESC = "AverageLoad"; - String LIVE_REGION_SERVERS_DESC = "Names of live RegionServers"; - String NUMBER_OF_REGION_SERVERS_DESC = "Number of RegionServers"; - String DEAD_REGION_SERVERS_DESC = "Names of dead RegionServers"; - String NUMBER_OF_DEAD_REGION_SERVERS_DESC = "Number of dead RegionServers"; - String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum"; - String SERVER_NAME_DESC = "Server Name"; - String CLUSTER_ID_DESC = "Cluster Id"; - String IS_ACTIVE_MASTER_DESC = "Is Active Master"; - - - - /** - * Increment the number of requests the cluster has seen. - * - * @param inc Ammount to increment the total by. - */ - void incRequests(final long inc); - - - - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java deleted file mode 100644 index 63a85a3..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -/** - * Interface of a factory to create MetricsMasterSource when given a MetricsMasterWrapper - */ -public interface MetricsMasterSourceFactory { - - MetricsMasterSource create(MetricsMasterWrapper masterWrapper); - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java deleted file mode 100644 index 678db69..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -/** - * This is the interface that will expose information to hadoop1/hadoop2 implementations of the - * MetricsMasterSource. - */ -public interface MetricsMasterWrapper { - - /** - * Get ServerName - */ - String getServerName(); - - /** - * Get Average Load - * - * @return Average Load - */ - double getAverageLoad(); - - /** - * Get the Cluster ID - * - * @return Cluster ID - */ - String getClusterId(); - - /** - * Get the Zookeeper Quorum Info - * - * @return Zookeeper Quorum Info - */ - String getZookeeperQuorum(); - - /** - * Get the co-processors - * - * @return Co-processors - */ - String[] getCoprocessors(); - - /** - * Get hbase master start time - * - * @return Start time of master in milliseconds - */ - long getStartTime(); - - /** - * Get the hbase master active time - * - * @return Time in milliseconds when master became active - */ - long getActiveTime(); - - /** - * Whether this master is the active master - * - * @return True if this is the active master - */ - boolean getIsActiveMaster(); - - /** - * Get the live region servers - * - * @return Live region servers - */ - String getRegionServers(); - - /** - * Get the number of live region servers - * - * @return number of Live region servers - */ - - int getNumRegionServers(); - - /** - * Get the dead region servers - * - * @return Dead region Servers - */ - String getDeadRegionServers(); - - /** - * Get the number of dead region servers - * - * @return number of Dead region Servers - */ - int getNumDeadRegionServers(); - - /** - * Get the number of master WAL files. - */ - long getNumWALFiles(); - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java deleted file mode 100644 index 371a316..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -public interface MetricsSnapshotSource extends BaseSource { - /** - * The name of the metrics - */ - String METRICS_NAME = "Snapshots"; - - /** - * The context metrics will be under. - */ - String METRICS_CONTEXT = "master"; - - /** - * The name of the metrics context that metrics will be under in jmx - */ - String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME; - - /** - * Description - */ - String METRICS_DESCRIPTION = "Metrics about HBase master server"; - - String SNAPSHOT_TIME_NAME = "snapshotTime"; - String SNAPSHOT_RESTORE_TIME_NAME = "snapshotRestoreTime"; - String SNAPSHOT_CLONE_TIME_NAME = "snapshotCloneTime"; - String SNAPSHOT_TIME_DESC = "Time it takes to finish snapshot()"; - String SNAPSHOT_RESTORE_TIME_DESC = "Time it takes to finish restoreSnapshot()"; - String SNAPSHOT_CLONE_TIME_DESC = "Time it takes to finish cloneSnapshot()"; - - void updateSnapshotTime(long time); - - void updateSnapshotCloneTime(long time); - - void updateSnapshotRestoreTime(long time); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java deleted file mode 100644 index 1c9a61e..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master.balancer; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -public interface MetricsBalancerSource extends BaseSource { - - /** - * The name of the metrics - */ - String METRICS_NAME = "Balancer"; - - /** - * The context metrics will be under. - */ - String METRICS_CONTEXT = "master"; - - /** - * The name of the metrics context that metrics will be under in jmx - */ - String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME; - - String BALANCE_CLUSTER = "balancerCluster"; - String MISC_INVOATION_COUNT = "miscInvocationCount"; - - /** - * Description - */ - String METRICS_DESCRIPTION = "Metrics about HBase master balancer"; - - void updateBalanceCluster(long time); - - void incrMiscInvocations(); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java deleted file mode 100644 index 1621932..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master.balancer; - -/** - * This interface extends the basic metrics balancer source to add a function - * to report metrics that related to stochastic load balancer. The purpose is to - * offer an insight to the internal cost calculations that can be useful to tune - * the balancer. For details, refer to HBASE-13965 - */ -public interface MetricsStochasticBalancerSource extends MetricsBalancerSource { - - /** - * Updates the number of metrics reported to JMX - */ - public void updateMetricsSize(int size); - - /** - * Reports stochastic load balancer costs to JMX - */ - public void updateStochasticCost(String tableName, String costFunctionName, - String costFunctionDesc, Double value); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java deleted file mode 100644 index 3ab783a..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.metrics; - -/** - * BaseSource for dynamic metrics to announce to Metrics2. - * In hbase-hadoop{1|2}-compat there is an implementation of this interface. - */ -public interface BaseSource { - - String HBASE_METRICS_SYSTEM_NAME = "HBase"; - - /** - * Clear out the metrics and re-prepare the source. - */ - void init(); - - /** - * Set a gauge to a specific value. - * - * @param gaugeName the name of the gauge - * @param value the value - */ - void setGauge(String gaugeName, long value); - - /** - * Add some amount to a gauge. - * - * @param gaugeName the name of the gauge - * @param delta the amount to change the gauge by. - */ - void incGauge(String gaugeName, long delta); - - /** - * Subtract some amount from a gauge. - * - * @param gaugeName the name of the gauge - * @param delta the amount to change the gauge by. - */ - void decGauge(String gaugeName, long delta); - - /** - * Remove a metric and no longer announce it. - * - * @param key Name of the gauge to remove. - */ - void removeMetric(String key); - - /** - * Add some amount to a counter. - * - * @param counterName the name of the counter - * @param delta the amount to change the counter by. - */ - void incCounters(String counterName, long delta); - - /** - * Add some value to a histogram. - * - * @param name the name of the histogram - * @param value the value to add to the histogram - */ - void updateHistogram(String name, long value); - - - /** - * Add some value to a Quantile (An accurate histogram). - * - * @param name the name of the quantile - * @param value the value to add to the quantile - */ - void updateQuantile(String name, long value); - - /** - * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. - * eg. regionserver, master, thriftserver - * - * @return The string context used to register this source to hadoop's metrics2 system. - */ - String getMetricsContext(); - - /** - * Get the description of what this source exposes. - */ - String getMetricsDescription(); - - /** - * Get the name of the context in JMX that this source will be exposed through. - * This is in ObjectName format. With the default context being Hadoop -> HBase - */ - String getMetricsJmxContext(); - - /** - * Get the name of the metrics that are being exported by this source. - * Eg. IPC, GC, WAL - */ - String getMetricsName(); - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java deleted file mode 100644 index e4ff880..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.metrics; - -import javax.management.ObjectName; - -/** - * Object that will register an mbean with the underlying metrics implementation. - */ -public interface MBeanSource { - - /** - * Register an mbean with the underlying metrics system - * @param serviceName Metrics service/system name - * @param metricsName name of the metrics object to expose - * @param theMbean the actual MBean - * @return ObjectName from jmx - */ - ObjectName register(String serviceName, String metricsName, - Object theMbean); - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java deleted file mode 100644 index 578ce49..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -/** - * This interface will be implemented by a MetricsSource that will export metrics from - * multiple regions into the hadoop metrics system. - */ -public interface MetricsRegionAggregateSource extends BaseSource { - - /** - * The name of the metrics - */ - String METRICS_NAME = "Regions"; - - /** - * The name of the metrics context that metrics will be under. - */ - String METRICS_CONTEXT = "regionserver"; - - /** - * Description - */ - String METRICS_DESCRIPTION = "Metrics about HBase RegionServer regions and tables"; - - /** - * The name of the metrics context that metrics will be under in jmx - */ - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - - String NUM_REGIONS = "numRegions"; - String NUMBER_OF_REGIONS_DESC = "Number of regions in the metrics system"; - - /** - * Register a MetricsRegionSource as being open. - * - * @param source the source for the region being opened. - */ - void register(MetricsRegionSource source); - - /** - * Remove a region's source. This is called when a region is closed. - * - * @param source The region to remove. - */ - void deregister(MetricsRegionSource source); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java deleted file mode 100644 index ee0217a..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ /dev/null @@ -1,330 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -/** - * Interface for classes that expose metrics about the regionserver. - */ -public interface MetricsRegionServerSource extends BaseSource { - - /** - * The name of the metrics - */ - String METRICS_NAME = "Server"; - - /** - * The name of the metrics context that metrics will be under. - */ - String METRICS_CONTEXT = "regionserver"; - - /** - * Description - */ - String METRICS_DESCRIPTION = "Metrics about HBase RegionServer"; - - /** - * The name of the metrics context that metrics will be under in jmx - */ - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - - /** - * Update the Put time histogram - * - * @param t time it took - */ - void updatePut(long t); - - /** - * Update the Delete time histogram - * - * @param t time it took - */ - void updateDelete(long t); - - /** - * Update the Get time histogram . - * - * @param t time it took - */ - void updateGet(long t); - - /** - * Update the Increment time histogram. - * - * @param t time it took - */ - void updateIncrement(long t); - - /** - * Update the Append time histogram. - * - * @param t time it took - */ - void updateAppend(long t); - - /** - * Update the Replay time histogram. - * - * @param t time it took - */ - void updateReplay(long t); - - /** - * Update the scan size. - * - * @param scanSize size of the scan - */ - void updateScannerNext(long scanSize); - - /** - * Increment the number of slow Puts that have happened. - */ - void incrSlowPut(); - - /** - * Increment the number of slow Deletes that have happened. - */ - void incrSlowDelete(); - - /** - * Increment the number of slow Gets that have happened. - */ - void incrSlowGet(); - - /** - * Increment the number of slow Increments that have happened. - */ - void incrSlowIncrement(); - - /** - * Increment the number of slow Appends that have happened. - */ - void incrSlowAppend(); - - /** - * Update the split transaction time histogram - * @param t time it took, in milliseconds - */ - void updateSplitTime(long t); - - /** - * Increment number of a requested splits - */ - void incrSplitRequest(); - - /** - * Increment number of successful splits - */ - void incrSplitSuccess(); - - /** - * Update the flush time histogram - * @param t time it took, in milliseconds - */ - void updateFlushTime(long t); - - // Strings used for exporting to metrics system. - String REGION_COUNT = "regionCount"; - String REGION_COUNT_DESC = "Number of regions"; - String STORE_COUNT = "storeCount"; - String STORE_COUNT_DESC = "Number of Stores"; - String WALFILE_COUNT = "hlogFileCount"; - String WALFILE_COUNT_DESC = "Number of WAL Files"; - String WALFILE_SIZE = "hlogFileSize"; - String WALFILE_SIZE_DESC = "Size of all WAL Files"; - String STOREFILE_COUNT = "storeFileCount"; - String STOREFILE_COUNT_DESC = "Number of Store Files"; - String MEMSTORE_SIZE = "memStoreSize"; - String MEMSTORE_SIZE_DESC = "Size of the memstore"; - String STOREFILE_SIZE = "storeFileSize"; - String STOREFILE_SIZE_DESC = "Size of storefiles being served."; - String TOTAL_REQUEST_COUNT = "totalRequestCount"; - String TOTAL_REQUEST_COUNT_DESC = - "Total number of requests this RegionServer has answered."; - String READ_REQUEST_COUNT = "readRequestCount"; - String READ_REQUEST_COUNT_DESC = - "Number of read requests this region server has answered."; - String WRITE_REQUEST_COUNT = "writeRequestCount"; - String WRITE_REQUEST_COUNT_DESC = - "Number of mutation requests this region server has answered."; - String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount"; - String CHECK_MUTATE_FAILED_COUNT_DESC = - "Number of Check and Mutate calls that failed the checks."; - String CHECK_MUTATE_PASSED_COUNT = "checkMutatePassedCount"; - String CHECK_MUTATE_PASSED_COUNT_DESC = - "Number of Check and Mutate calls that passed the checks."; - String STOREFILE_INDEX_SIZE = "storeFileIndexSize"; - String STOREFILE_INDEX_SIZE_DESC = "Size of indexes in storefiles on disk."; - String STATIC_INDEX_SIZE = "staticIndexSize"; - String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes."; - String STATIC_BLOOM_SIZE = "staticBloomSize"; - String STATIC_BLOOM_SIZE_DESC = - "Uncompressed size of the static bloom filters."; - String NUMBER_OF_MUTATIONS_WITHOUT_WAL = "mutationsWithoutWALCount"; - String NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC = - "Number of mutations that have been sent by clients with the write ahead logging turned off."; - String DATA_SIZE_WITHOUT_WAL = "mutationsWithoutWALSize"; - String DATA_SIZE_WITHOUT_WAL_DESC = - "Size of data that has been sent by clients with the write ahead logging turned off."; - String PERCENT_FILES_LOCAL = "percentFilesLocal"; - String PERCENT_FILES_LOCAL_DESC = - "The percent of HFiles that are stored on the local hdfs data node."; - String PERCENT_FILES_LOCAL_SECONDARY_REGIONS = "percentFilesLocalSecondaryRegions"; - String PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC = - "The percent of HFiles used by secondary regions that are stored on the local hdfs data node."; - String SPLIT_QUEUE_LENGTH = "splitQueueLength"; - String SPLIT_QUEUE_LENGTH_DESC = "Length of the queue for splits."; - String COMPACTION_QUEUE_LENGTH = "compactionQueueLength"; - String LARGE_COMPACTION_QUEUE_LENGTH = "largeCompactionQueueLength"; - String SMALL_COMPACTION_QUEUE_LENGTH = "smallCompactionQueueLength"; - String COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions."; - String FLUSH_QUEUE_LENGTH = "flushQueueLength"; - String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes"; - String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize"; - String BLOCK_CACHE_FREE_DESC = - "Size of the block cache that is not occupied."; - String BLOCK_CACHE_COUNT = "blockCacheCount"; - String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache."; - String BLOCK_CACHE_SIZE = "blockCacheSize"; - String BLOCK_CACHE_SIZE_DESC = "Size of the block cache."; - String BLOCK_CACHE_HIT_COUNT = "blockCacheHitCount"; - String BLOCK_CACHE_HIT_COUNT_DESC = "Count of the hit on the block cache."; - String BLOCK_CACHE_PRIMARY_HIT_COUNT = "blockCacheHitCountPrimary"; - String BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC = "Count of hit on primary replica in the block cache."; - String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount"; - String BLOCK_COUNT_MISS_COUNT_DESC = - "Number of requests for a block that missed the block cache."; - String BLOCK_CACHE_PRIMARY_MISS_COUNT = "blockCacheMissCountPrimary"; - String BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC = - "Number of requests for a block of primary replica that missed the block cache."; - String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount"; - String BLOCK_CACHE_EVICTION_COUNT_DESC = - "Count of the number of blocks evicted from the block cache."; - String BLOCK_CACHE_PRIMARY_EVICTION_COUNT = "blockCacheEvictionCountPrimary"; - String BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC = - "Count of the number of blocks evicted from primary replica in the block cache."; - String BLOCK_CACHE_HIT_PERCENT = "blockCacheCountHitPercent"; - String BLOCK_CACHE_HIT_PERCENT_DESC = - "Percent of block cache requests that are hits"; - String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent"; - String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC = - "The percent of the time that requests with the cache turned on hit the cache."; - String BLOCK_CACHE_FAILED_INSERTION_COUNT = "blockCacheFailedInsertionCount"; - String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = "Number of times that a block cache " + - "insertion failed. Usually due to size restrictions."; - String RS_START_TIME_NAME = "regionServerStartTime"; - String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum"; - String SERVER_NAME_NAME = "serverName"; - String CLUSTER_ID_NAME = "clusterId"; - String RS_START_TIME_DESC = "RegionServer Start Time"; - String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum"; - String SERVER_NAME_DESC = "Server Name"; - String CLUSTER_ID_DESC = "Cluster Id"; - String UPDATES_BLOCKED_TIME = "updatesBlockedTime"; - String UPDATES_BLOCKED_DESC = - "Number of MS updates have been blocked so that the memstore can be flushed."; - String DELETE_KEY = "delete"; - String GET_KEY = "get"; - String INCREMENT_KEY = "increment"; - String MUTATE_KEY = "mutate"; - String APPEND_KEY = "append"; - String REPLAY_KEY = "replay"; - String SCAN_NEXT_KEY = "scanNext"; - String SLOW_MUTATE_KEY = "slowPutCount"; - String SLOW_GET_KEY = "slowGetCount"; - String SLOW_DELETE_KEY = "slowDeleteCount"; - String SLOW_INCREMENT_KEY = "slowIncrementCount"; - String SLOW_APPEND_KEY = "slowAppendCount"; - String SLOW_MUTATE_DESC = - "The number of Multis that took over 1000ms to complete"; - String SLOW_DELETE_DESC = - "The number of Deletes that took over 1000ms to complete"; - String SLOW_GET_DESC = "The number of Gets that took over 1000ms to complete"; - String SLOW_INCREMENT_DESC = - "The number of Increments that took over 1000ms to complete"; - String SLOW_APPEND_DESC = - "The number of Appends that took over 1000ms to complete"; - - String FLUSHED_CELLS = "flushedCellsCount"; - String FLUSHED_CELLS_DESC = "The number of cells flushed to disk"; - String FLUSHED_CELLS_SIZE = "flushedCellsSize"; - String FLUSHED_CELLS_SIZE_DESC = "The total amount of data flushed to disk, in bytes"; - String COMPACTED_CELLS = "compactedCellsCount"; - String COMPACTED_CELLS_DESC = "The number of cells processed during minor compactions"; - String COMPACTED_CELLS_SIZE = "compactedCellsSize"; - String COMPACTED_CELLS_SIZE_DESC = - "The total amount of data processed during minor compactions, in bytes"; - String MAJOR_COMPACTED_CELLS = "majorCompactedCellsCount"; - String MAJOR_COMPACTED_CELLS_DESC = - "The number of cells processed during major compactions"; - String MAJOR_COMPACTED_CELLS_SIZE = "majorCompactedCellsSize"; - String MAJOR_COMPACTED_CELLS_SIZE_DESC = - "The total amount of data processed during major compactions, in bytes"; - String CELLS_COUNT_COMPACTED_TO_MOB = "cellsCountCompactedToMob"; - String CELLS_COUNT_COMPACTED_TO_MOB_DESC = - "The number of cells moved to mob during compaction"; - String CELLS_COUNT_COMPACTED_FROM_MOB = "cellsCountCompactedFromMob"; - String CELLS_COUNT_COMPACTED_FROM_MOB_DESC = - "The number of cells moved from mob during compaction"; - String CELLS_SIZE_COMPACTED_TO_MOB = "cellsSizeCompactedToMob"; - String CELLS_SIZE_COMPACTED_TO_MOB_DESC = - "The total amount of cells move to mob during compaction, in bytes"; - String CELLS_SIZE_COMPACTED_FROM_MOB = "cellsSizeCompactedFromMob"; - String CELLS_SIZE_COMPACTED_FROM_MOB_DESC = - "The total amount of cells move from mob during compaction, in bytes"; - String MOB_FLUSH_COUNT = "mobFlushCount"; - String MOB_FLUSH_COUNT_DESC = "The number of the flushes in mob-enabled stores"; - String MOB_FLUSHED_CELLS_COUNT = "mobFlushedCellsCount"; - String MOB_FLUSHED_CELLS_COUNT_DESC = "The number of mob cells flushed to disk"; - String MOB_FLUSHED_CELLS_SIZE = "mobFlushedCellsSize"; - String MOB_FLUSHED_CELLS_SIZE_DESC = "The total amount of mob cells flushed to disk, in bytes"; - String MOB_SCAN_CELLS_COUNT = "mobScanCellsCount"; - String MOB_SCAN_CELLS_COUNT_DESC = "The number of scanned mob cells"; - String MOB_SCAN_CELLS_SIZE = "mobScanCellsSize"; - String MOB_SCAN_CELLS_SIZE_DESC = "The total amount of scanned mob cells, in bytes"; - String MOB_FILE_CACHE_ACCESS_COUNT = "mobFileCacheAccessCount"; - String MOB_FILE_CACHE_ACCESS_COUNT_DESC = "The count of accesses to the mob file cache"; - String MOB_FILE_CACHE_MISS_COUNT = "mobFileCacheMissCount"; - String MOB_FILE_CACHE_MISS_COUNT_DESC = "The count of misses to the mob file cache"; - String MOB_FILE_CACHE_HIT_PERCENT = "mobFileCacheHitPercent"; - String MOB_FILE_CACHE_HIT_PERCENT_DESC = "The hit percent to the mob file cache"; - String MOB_FILE_CACHE_EVICTED_COUNT = "mobFileCacheEvictedCount"; - String MOB_FILE_CACHE_EVICTED_COUNT_DESC = "The number of items evicted from the mob file cache"; - String MOB_FILE_CACHE_COUNT = "mobFileCacheCount"; - String MOB_FILE_CACHE_COUNT_DESC = "The count of cached mob files"; - - String HEDGED_READS = "hedgedReads"; - String HEDGED_READS_DESC = "The number of times we started a hedged read"; - String HEDGED_READ_WINS = "hedgedReadWins"; - String HEDGED_READ_WINS_DESC = - "The number of times we started a hedged read and a hedged read won"; - - String BLOCKED_REQUESTS_COUNT = "blockedRequestCount"; - String BLOCKED_REQUESTS_COUNT_DESC = "The number of blocked requests because of memstore size is " - + "larger than blockingMemStoreSize"; - - String SPLIT_KEY = "splitTime"; - String SPLIT_REQUEST_KEY = "splitRequestCount"; - String SPLIT_REQUEST_DESC = "Number of splits requested"; - String SPLIT_SUCCESS_KEY = "splitSuccessCount"; - String SPLIT_SUCCESS_DESC = "Number of successfully executed splits"; - String FLUSH_KEY = "flushTime"; -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java deleted file mode 100644 index bb44946..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -/** - * Interface of a factory to create Metrics Sources used inside of regionservers. - */ -public interface MetricsRegionServerSourceFactory { - - /** - * Given a wrapper create a MetricsRegionServerSource. - * - * @param regionServerWrapper The wrapped region server - * @return a Metrics Source. - */ - MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper); - - /** - * Create a MetricsRegionSource from a MetricsRegionWrapper. - * - * @param wrapper - * @return A metrics region source - */ - MetricsRegionSource createRegion(MetricsRegionWrapper wrapper); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java deleted file mode 100644 index 02dec8d..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ /dev/null @@ -1,370 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -/** - * This is the interface that will expose RegionServer information to hadoop1/hadoop2 - * implementations of the MetricsRegionServerSource. - */ -public interface MetricsRegionServerWrapper { - - /** - * Get ServerName - */ - String getServerName(); - - /** - * Get the Cluster ID - * - * @return Cluster ID - */ - String getClusterId(); - - /** - * Get the Zookeeper Quorum Info - * - * @return Zookeeper Quorum Info - */ - String getZookeeperQuorum(); - - /** - * Get the co-processors - * - * @return Co-processors - */ - String getCoprocessors(); - - /** - * Get HRegionServer start time - * - * @return Start time of RegionServer in milliseconds - */ - long getStartCode(); - - /** - * The number of online regions - */ - long getNumOnlineRegions(); - - /** - * Get the number of stores hosted on this region server. - */ - long getNumStores(); - - /** - * Get the number of WAL files of this region server. - */ - long getNumWALFiles(); - - /** - * Get the size of WAL files of this region server. - */ - long getWALFileSize(); - - /** - * Get the number of WAL files with slow appends for this region server. - */ - long getNumWALSlowAppend(); - - /** - * Get the number of store files hosted on this region server. - */ - long getNumStoreFiles(); - - /** - * Get the size of the memstore on this region server. - */ - long getMemstoreSize(); - - /** - * Get the total size of the store files this region server is serving from. - */ - long getStoreFileSize(); - - /** - * Get the number of requests per second. - */ - double getRequestsPerSecond(); - - /** - * Get the total number of requests per second. - */ - long getTotalRequestCount(); - - /** - * Get the number of read requests to regions hosted on this region server. - */ - long getReadRequestsCount(); - - /** - * Get the number of write requests to regions hosted on this region server. - */ - long getWriteRequestsCount(); - - /** - * Get the number of CAS operations that failed. - */ - long getCheckAndMutateChecksFailed(); - - /** - * Get the number of CAS operations that passed. - */ - long getCheckAndMutateChecksPassed(); - - /** - * Get the Size (in bytes) of indexes in storefiles on disk. - */ - long getStoreFileIndexSize(); - - /** - * Get the size (in bytes) of of the static indexes including the roots. - */ - long getTotalStaticIndexSize(); - - /** - * Get the size (in bytes) of the static bloom filters. - */ - long getTotalStaticBloomSize(); - - /** - * Number of mutations received with WAL explicitly turned off. - */ - long getNumMutationsWithoutWAL(); - - /** - * Ammount of data in the memstore but not in the WAL because mutations explicitly had their - * WAL turned off. - */ - long getDataInMemoryWithoutWAL(); - - /** - * Get the percent of HFiles' that are local. - */ - double getPercentFileLocal(); - - /** - * Get the percent of HFiles' that are local for secondary region replicas. - */ - double getPercentFileLocalSecondaryRegions(); - - /** - * Get the size of the split queue - */ - int getSplitQueueSize(); - - /** - * Get the size of the compaction queue - */ - int getCompactionQueueSize(); - - int getSmallCompactionQueueSize(); - - int getLargeCompactionQueueSize(); - - /** - * Get the size of the flush queue. - */ - int getFlushQueueSize(); - - /** - * Get the size (in bytes) of the block cache that is free. - */ - long getBlockCacheFreeSize(); - - /** - * Get the number of items in the block cache. - */ - long getBlockCacheCount(); - - /** - * Get the total size (in bytes) of the block cache. - */ - long getBlockCacheSize(); - - /** - * Get the count of hits to the block cache - */ - long getBlockCacheHitCount(); - - /** - * Get the count of hits to primary replica in the block cache - */ - long getBlockCachePrimaryHitCount(); - - /** - * Get the count of misses to the block cache. - */ - long getBlockCacheMissCount(); - - /** - * Get the count of misses to primary replica in the block cache. - */ - long getBlockCachePrimaryMissCount(); - - /** - * Get the number of items evicted from the block cache. - */ - long getBlockCacheEvictedCount(); - - /** - * Get the number of items evicted from primary replica in the block cache. - */ - long getBlockCachePrimaryEvictedCount(); - - - /** - * Get the percent of all requests that hit the block cache. - */ - double getBlockCacheHitPercent(); - - /** - * Get the percent of requests with the block cache turned on that hit the block cache. - */ - double getBlockCacheHitCachingPercent(); - - /** - * Number of cache insertions that failed. - */ - long getBlockCacheFailedInsertions(); - - /** - * Force a re-computation of the metrics. - */ - void forceRecompute(); - - /** - * Get the amount of time that updates were blocked. - */ - long getUpdatesBlockedTime(); - - /** - * Get the number of cells flushed to disk. - */ - long getFlushedCellsCount(); - - /** - * Get the number of cells processed during minor compactions. - */ - long getCompactedCellsCount(); - - /** - * Get the number of cells processed during major compactions. - */ - long getMajorCompactedCellsCount(); - - /** - * Get the total amount of data flushed to disk, in bytes. - */ - long getFlushedCellsSize(); - - /** - * Get the total amount of data processed during minor compactions, in bytes. - */ - long getCompactedCellsSize(); - - /** - * Get the total amount of data processed during major compactions, in bytes. - */ - long getMajorCompactedCellsSize(); - - /** - * Gets the number of cells moved to mob during compaction. - */ - long getCellsCountCompactedToMob(); - - /** - * Gets the number of cells moved from mob during compaction. - */ - long getCellsCountCompactedFromMob(); - - /** - * Gets the total amount of cells moved to mob during compaction, in bytes. - */ - long getCellsSizeCompactedToMob(); - - /** - * Gets the total amount of cells moved from mob during compaction, in bytes. - */ - long getCellsSizeCompactedFromMob(); - - /** - * Gets the number of the flushes in mob-enabled stores. - */ - long getMobFlushCount(); - - /** - * Gets the number of mob cells flushed to disk. - */ - long getMobFlushedCellsCount(); - - /** - * Gets the total amount of mob cells flushed to disk, in bytes. - */ - long getMobFlushedCellsSize(); - - /** - * Gets the number of scanned mob cells. - */ - long getMobScanCellsCount(); - - /** - * Gets the total amount of scanned mob cells, in bytes. - */ - long getMobScanCellsSize(); - - /** - * Gets the count of accesses to the mob file cache. - */ - long getMobFileCacheAccessCount(); - - /** - * Gets the count of misses to the mob file cache. - */ - long getMobFileCacheMissCount(); - - /** - * Gets the number of items evicted from the mob file cache. - */ - long getMobFileCacheEvictedCount(); - - /** - * Gets the count of cached mob files. - */ - long getMobFileCacheCount(); - - /** - * Gets the hit percent to the mob file cache. - */ - double getMobFileCacheHitPercent(); - - /** - * @return Count of hedged read operations - */ - long getHedgedReadOps(); - - /** - * @return Count of times a hedged read beat out the primary read. - */ - long getHedgedReadWins(); - - /** - * @return Count of requests blocked because the memstore size is larger than blockingMemStoreSize - */ - long getBlockedRequestsCount(); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java deleted file mode 100644 index 874be31..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - - -/** - * This interface will be implemented to allow single regions to push metrics into - * MetricsRegionAggregateSource that will in turn push data to the Hadoop metrics system. - */ -public interface MetricsRegionSource extends Comparable { - - String OPS_SAMPLE_NAME = "ops"; - String SIZE_VALUE_NAME = "size"; - String COMPACTIONS_COMPLETED_COUNT = "compactionsCompletedCount"; - String NUM_BYTES_COMPACTED_COUNT = "numBytesCompactedCount"; - String NUM_FILES_COMPACTED_COUNT = "numFilesCompactedCount"; - String COMPACTIONS_COMPLETED_DESC = "Number of compactions that have completed."; - String NUM_BYTES_COMPACTED_DESC = - "Sum of filesize on all files entering a finished, successful or aborted, compaction"; - String NUM_FILES_COMPACTED_DESC = - "Number of files that were input for finished, successful or aborted, compactions"; - String COPROCESSOR_EXECUTION_STATISTICS = "coprocessorExecutionStatistics"; - String COPROCESSOR_EXECUTION_STATISTICS_DESC = "Statistics for coprocessor execution times"; - String REPLICA_ID = "replicaid"; - String REPLICA_ID_DESC = "The replica ID of a region. 0 is primary, otherwise is secondary"; - - /** - * Close the region's metrics as this region is closing. - */ - void close(); - - /** - * Update related counts of puts. - */ - void updatePut(); - - /** - * Update related counts of deletes. - */ - void updateDelete(); - - /** - * Update count and sizes of gets. - * @param getSize size in bytes of the resulting key values for a get - */ - void updateGet(long getSize); - - /** - * Update the count and sizes of resultScanner.next() - * @param scanSize Size in bytes of the resulting key values for a next() - */ - void updateScan(long scanSize); - /** - * Update related counts of increments. - */ - void updateIncrement(); - - /** - * Update related counts of appends. - */ - void updateAppend(); - - /** - * Get the aggregate source to which this reports. - */ - MetricsRegionAggregateSource getAggregateSource(); - - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java deleted file mode 100644 index 0997f7c..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -/** - * Interface of class that will wrap an HRegion and export numbers so they can be - * used in MetricsRegionSource - */ -public interface MetricsRegionWrapper { - - /** - * Get the name of the table the region belongs to. - * - * @return The string version of the table name. - */ - String getTableName(); - - /** - * Get the name of the namespace this table is in. - * @return String version of the namespace. Can't be empty. - */ - String getNamespace(); - - /** - * Get the name of the region. - * - * @return The encoded name of the region. - */ - String getRegionName(); - - /** - * Get the number of stores hosted on this region server. - */ - long getNumStores(); - - /** - * Get the number of store files hosted on this region server. - */ - long getNumStoreFiles(); - - /** - * Get the size of the memstore on this region server. - */ - long getMemstoreSize(); - - /** - * Get the total size of the store files this region server is serving from. - */ - long getStoreFileSize(); - - /** - * Get the total number of read requests that have been issued against this region - */ - long getReadRequestCount(); - - /** - * Get the total number of mutations that have been issued against this region. - */ - long getWriteRequestCount(); - - long getNumFilesCompacted(); - - long getNumBytesCompacted(); - - long getNumCompactionsCompleted(); - - int getRegionHashCode(); - - /** - * Get the replica id of this region. - */ - int getReplicaId(); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java deleted file mode 100644 index 4f8cb36..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver.wal; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -/** - * Interface of the source that will export metrics about log replay statistics when recovering a - * region server in distributedLogReplay mode - */ -public interface MetricsEditsReplaySource extends BaseSource { - - /** - * The name of the metrics - */ - String METRICS_NAME = "replay"; - - /** - * The name of the metrics context that metrics will be under. - */ - String METRICS_CONTEXT = "regionserver"; - - /** - * Description - */ - String METRICS_DESCRIPTION = "Metrics about HBase RegionServer WAL Edits Replay"; - - /** - * The name of the metrics context that metrics will be under in jmx - */ - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - - - String REPLAY_TIME_NAME = "replayTime"; - String REPLAY_TIME_DESC = "Time an replay operation took."; - String REPLAY_BATCH_SIZE_NAME = "replayBatchSize"; - String REPLAY_BATCH_SIZE_DESC = "Number of changes in each replay batch."; - String REPLAY_DATA_SIZE_NAME = "replayDataSize"; - String REPLAY_DATA_SIZE_DESC = "Size (in bytes) of the data of each replay."; - - /** - * Add the time a replay command took - */ - void updateReplayTime(long time); - - /** - * Add the batch size of each replay - */ - void updateReplayBatchSize(long size); - - /** - * Add the payload data size of each replay - */ - void updateReplayDataSize(long size); - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java deleted file mode 100644 index c6dc731..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver.wal; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -/** - * Interface of the source that will export metrics about the region server's WAL. - */ -public interface MetricsWALSource extends BaseSource { - - - /** - * The name of the metrics - */ - String METRICS_NAME = "WAL"; - - /** - * The name of the metrics context that metrics will be under. - */ - String METRICS_CONTEXT = "regionserver"; - - /** - * Description - */ - String METRICS_DESCRIPTION = "Metrics about HBase RegionServer WAL"; - - /** - * The name of the metrics context that metrics will be under in jmx - */ - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - - - String APPEND_TIME = "appendTime"; - String APPEND_TIME_DESC = "Time an append to the log took."; - String APPEND_COUNT = "appendCount"; - String APPEND_COUNT_DESC = "Number of appends to the write ahead log."; - String APPEND_SIZE = "appendSize"; - String APPEND_SIZE_DESC = "Size (in bytes) of the data appended to the WAL."; - String SLOW_APPEND_COUNT = "slowAppendCount"; - String SLOW_APPEND_COUNT_DESC = "Number of appends that were slow."; - String SYNC_TIME = "syncTime"; - String SYNC_TIME_DESC = "The time it took to sync the WAL to HDFS."; - String ROLL_REQUESTED = "rollRequest"; - String ROLL_REQUESTED_DESC = "How many times a log roll has been requested total"; - String LOW_REPLICA_ROLL_REQUESTED = "lowReplicaRollRequest"; - String LOW_REPLICA_ROLL_REQUESTED_DESC = - "How many times a log roll was requested due to too few DN's in the write pipeline."; - - /** - * Add the append size. - */ - void incrementAppendSize(long size); - - /** - * Add the time it took to append. - */ - void incrementAppendTime(long time); - - /** - * Increment the count of wal appends - */ - void incrementAppendCount(); - - /** - * Increment the number of appends that were slow - */ - void incrementSlowAppendCount(); - - /** - * Add the time it took to sync the wal. - */ - void incrementSyncTime(long time); - - void incrementLogRollRequested(); - - void incrementLowReplicationLogRoll(); - - long getSlowAppendCount(); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java deleted file mode 100644 index 9fb8415..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver; - -public interface MetricsReplicationSinkSource { - public static final String SINK_AGE_OF_LAST_APPLIED_OP = "sink.ageOfLastAppliedOp"; - public static final String SINK_APPLIED_BATCHES = "sink.appliedBatches"; - public static final String SINK_APPLIED_OPS = "sink.appliedOps"; - public static final String SINK_APPLIED_HFILES = "sink.appliedHFiles"; - - void setLastAppliedOpAge(long age); - void incrAppliedBatches(long batches); - void incrAppliedOps(long batchsize); - long getLastAppliedOpAge(); - void incrAppliedHFiles(long hfileSize); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java deleted file mode 100644 index 6a91701..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -/** - * Provides access to gauges and counters. Implementers will hide the details of hadoop1 or - * hadoop2's metrics2 classes and publishing. - */ -public interface MetricsReplicationSource extends BaseSource { - /** - * The name of the metrics - */ - String METRICS_NAME = "Replication"; - - /** - * The name of the metrics context that metrics will be under. - */ - String METRICS_CONTEXT = "regionserver"; - - /** - * The name of the metrics context that metrics will be under. - */ - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - - /** - * A description. - */ - String METRICS_DESCRIPTION = "Metrics about HBase replication"; - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java deleted file mode 100644 index 0e1c5cc..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver; - -public interface MetricsReplicationSourceFactory { - public MetricsReplicationSinkSource getSink(); - public MetricsReplicationSourceSource getSource(String id); - public MetricsReplicationSourceSource getGlobalSource(); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java deleted file mode 100644 index 188c3a3..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver; - -public interface MetricsReplicationSourceSource { - - public static final String SOURCE_SIZE_OF_LOG_QUEUE = "source.sizeOfLogQueue"; - public static final String SOURCE_AGE_OF_LAST_SHIPPED_OP = "source.ageOfLastShippedOp"; - public static final String SOURCE_SHIPPED_BATCHES = "source.shippedBatches"; - - public static final String SOURCE_SHIPPED_KBS = "source.shippedKBs"; - public static final String SOURCE_SHIPPED_OPS = "source.shippedOps"; - - public static final String SOURCE_LOG_READ_IN_BYTES = "source.logReadInBytes"; - public static final String SOURCE_LOG_READ_IN_EDITS = "source.logEditsRead"; - - public static final String SOURCE_LOG_EDITS_FILTERED = "source.logEditsFiltered"; - - public static final String SOURCE_SHIPPED_HFILES = "source.shippedHFiles"; - public static final String SOURCE_SIZE_OF_HFILE_REFS_QUEUE = "source.sizeOfHFileRefsQueue"; - - void setLastShippedAge(long age); - void setSizeOfLogQueue(int size); - void incrSizeOfLogQueue(int size); - void decrSizeOfLogQueue(int size); - void incrLogEditsFiltered(long size); - void incrBatchesShipped(int batches); - void incrOpsShipped(long ops); - void incrShippedKBs(long size); - void incrLogReadInBytes(long size); - void incrLogReadInEdits(long size); - void clear(); - long getLastShippedAge(); - void incrHFilesShipped(long hfiles); - void incrSizeOfHFileRefsQueue(long size); - void decrSizeOfHFileRefsQueue(long size); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java deleted file mode 100644 index 4ecd73b..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.rest; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -/** - * Interface of the Metrics Source that will export data to Hadoop's Metrics2 system. - */ -public interface MetricsRESTSource extends BaseSource { - - String METRICS_NAME = "REST"; - - String CONTEXT = "rest"; - - String JMX_CONTEXT = "REST"; - - String METRICS_DESCRIPTION = "Metrics about the HBase REST server"; - - String REQUEST_KEY = "requests"; - - String SUCCESSFUL_GET_KEY = "successfulGet"; - - String SUCCESSFUL_PUT_KEY = "successfulPut"; - - String SUCCESSFUL_DELETE_KEY = "successfulDelete"; - - String FAILED_GET_KEY = "failedGet"; - - String FAILED_PUT_KEY = "failedPut"; - - String FAILED_DELETE_KEY = "failedDelete"; - - String SUCCESSFUL_SCAN_KEY = "successfulScanCount"; - - String FAILED_SCAN_KEY = "failedScanCount"; - - /** - * Increment the number of requests - * - * @param inc Ammount to increment by - */ - void incrementRequests(int inc); - - /** - * Increment the number of successful Get requests. - * - * @param inc Number of successful get requests. - */ - void incrementSucessfulGetRequests(int inc); - - /** - * Increment the number of successful Put requests. - * - * @param inc Number of successful put requests. - */ - void incrementSucessfulPutRequests(int inc); - - /** - * Increment the number of successful Delete requests. - * - * @param inc - */ - void incrementSucessfulDeleteRequests(int inc); - - /** - * Increment the number of failed Put Requests. - * - * @param inc Number of failed Put requests. - */ - void incrementFailedPutRequests(int inc); - - /** - * Increment the number of failed Get requests. - * - * @param inc The number of failed Get Requests. - */ - void incrementFailedGetRequests(int inc); - - /** - * Increment the number of failed Delete requests. - * - * @param inc The number of failed delete requests. - */ - void incrementFailedDeleteRequests(int inc); - - /** - * Increment the number of successful scan requests. - * - * @param inc Number of successful scan requests. - */ - void incrementSucessfulScanRequests(final int inc); - - /** - * Increment the number failed scan requests. - * - * @param inc the inc - */ - void incrementFailedScanRequests(final int inc); -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java deleted file mode 100644 index a25df56..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.thrift; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -/** - * Interface of a class that will export metrics about Thrift to hadoop's metrics2. - */ -public interface MetricsThriftServerSource extends BaseSource { - - String BATCH_GET_KEY = "batchGet"; - String BATCH_MUTATE_KEY = "batchMutate"; - String TIME_IN_QUEUE_KEY = "timeInQueue"; - String THRIFT_CALL_KEY = "thriftCall"; - String SLOW_THRIFT_CALL_KEY = "slowThriftCall"; - String CALL_QUEUE_LEN_KEY = "callQueueLen"; - - /** - * Add how long an operation was in the queue. - * @param time - */ - void incTimeInQueue(long time); - - /** - * Set the call queue length. - * @param len Time - */ - void setCallQueueLen(int len); - - /** - * Add how many keys were in a batch get. - * @param diff Num Keys - */ - void incNumRowKeysInBatchGet(int diff); - - /** - * Add how many keys were in a batch mutate. - * @param diff Num Keys - */ - void incNumRowKeysInBatchMutate(int diff); - - /** - * Add how long a method took - * @param name Method name - * @param time Time - */ - void incMethodTime(String name, long time); - - /** - * Add how long a call took - * @param time Time - */ - void incCall(long time); - - /** - * Increment how long a slow call took. - * @param time Time - */ - void incSlowCall(long time); - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java deleted file mode 100644 index a4608b5..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.thrift; - -/** Factory that will be used to create metrics sources for the two diffent types of thrift servers. */ -public interface MetricsThriftServerSourceFactory { - - String METRICS_NAME = "Thrift"; - String METRICS_DESCRIPTION = "Thrift Server Metrics"; - String THRIFT_ONE_METRICS_CONTEXT = "thrift-one"; - String THRIFT_ONE_JMX_CONTEXT = "Thrift,sub=ThriftOne"; - String THRIFT_TWO_METRICS_CONTEXT = "thrift-two"; - String THRIFT_TWO_JMX_CONTEXT = "Thrift,sub=ThriftTwo"; - - /** Create a Source for a thrift one server */ - MetricsThriftServerSource createThriftOneSource(); - - /** Create a Source for a thrift two server */ - MetricsThriftServerSource createThriftTwoSource(); - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java deleted file mode 100644 index b759efb..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2; - -/** - * Metrics Histogram interface. Implementing classes will expose computed - * quartile values through the metrics system. - */ -public interface MetricHistogram { - - //Strings used to create metrics names. - String NUM_OPS_METRIC_NAME = "_num_ops"; - String MIN_METRIC_NAME = "_min"; - String MAX_METRIC_NAME = "_max"; - String MEAN_METRIC_NAME = "_mean"; - String MEDIAN_METRIC_NAME = "_median"; - String SEVENTY_FIFTH_PERCENTILE_METRIC_NAME = "_75th_percentile"; - String NINETIETH_PERCENTILE_METRIC_NAME = "_90th_percentile"; - String NINETY_FIFTH_PERCENTILE_METRIC_NAME = "_95th_percentile"; - String NINETY_NINETH_PERCENTILE_METRIC_NAME = "_99th_percentile"; - - /** - * Add a single value to a histogram's stream of values. - * @param value - */ - void add(long value); - -} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java deleted file mode 100644 index f2ebc94..0000000 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2; - -import java.util.concurrent.ScheduledExecutorService; - -/** - * ScheduledExecutorService for metrics. - */ -public interface MetricsExecutor { - - ScheduledExecutorService getExecutor(); - - void stop(); - -} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java deleted file mode 100644 index 157327b..0000000 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - - -/** - * A compatibility shim layer for interacting with different versions of Hadoop. - */ -//NOTE: we can move this under src/main if main code wants to use this shim layer -public interface HadoopShims { - - /** - * Returns a TaskAttemptContext instance created from the given parameters. - * @param job an instance of o.a.h.mapreduce.Job - * @param taskId an identifier for the task attempt id. Should be parsable by - * TaskAttemptId.forName() - * @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext - */ - T createTestTaskAttemptContext(final J job, final String taskId); - -} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java deleted file mode 100644 index f72843c..0000000 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -public interface RandomStringGenerator { - String getRandString(); -} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java deleted file mode 100644 index 8e3b71d..0000000 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - - -import java.util.UUID; - -public class RandomStringGeneratorImpl implements RandomStringGenerator { - - private final String s; - - public RandomStringGeneratorImpl() { - s = UUID.randomUUID().toString(); - } - - @Override - public String getRandString() { - return s; - } -} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java deleted file mode 100644 index f942059..0000000 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; - -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; - -@Category({MetricsTests.class, SmallTests.class}) -public class TestCompatibilitySingletonFactory { - - private static final int ITERATIONS = 100000; - private static final Random RANDOM = new Random(); - - private class TestCompatibilitySingletonFactoryCallable implements Callable { - - @Override - public String call() throws Exception { - Thread.sleep(RANDOM.nextInt(10)); - RandomStringGenerator - instance = - CompatibilitySingletonFactory.getInstance(RandomStringGenerator.class); - return instance.getRandString(); - } - } - - @Test - public void testGetInstance() throws Exception { - List callables = - new ArrayList(ITERATIONS); - List resultStrings = new ArrayList(ITERATIONS); - - - // Create the callables. - for (int i = 0; i < ITERATIONS; i++) { - callables.add(new TestCompatibilitySingletonFactoryCallable()); - } - - // Now run the callables. - ExecutorService executorService = Executors.newFixedThreadPool(100); - List> futures = executorService.invokeAll(callables); - - // Wait for them all to finish. - for (Future f : futures) { - resultStrings.add(f.get()); - } - - // Get the first string. - String firstString = resultStrings.get(0); - - - // Assert that all the strings are equal to the fist. - for (String s : resultStrings) { - assertEquals(firstString, s); - } - - // an assert to make sure that RandomStringGeneratorImpl is generating random strings. - assertNotEquals(new RandomStringGeneratorImpl().getRandString(), firstString); - } -} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java deleted file mode 100644 index 91efd39..0000000 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.master.MetricsMasterSource; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Test for the CompatibilitySingletonFactory and building MetricsMasterSource - */ -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsMasterSourceFactory { - - @Test(expected=RuntimeException.class) - public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class); - - } -} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java deleted file mode 100644 index fbf1994..0000000 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Test for the CompatibilitySingletonFactory and building MetricsRegionServerSource - */ -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsRegionServerSourceFactory { - - @Test(expected=RuntimeException.class) - public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); - - } -} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java deleted file mode 100644 index aabfdfe..0000000 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver.wal; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsWALSource { - - @Test(expected=RuntimeException.class) - public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); - - } -} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java deleted file mode 100644 index 3a8bb89..0000000 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Test for the CompatibilitySingletonFactory and building MetricsReplicationSource - */ -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsReplicationSourceFactory { - - @Test(expected=RuntimeException.class) - public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class); - } -} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java deleted file mode 100644 index ee2f164..0000000 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.rest; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.rest.MetricsRESTSource; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Test of Rest Metrics Source interface. - */ -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsRESTSource { - - - @Test(expected=RuntimeException.class) - public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); - } - -} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java deleted file mode 100644 index 70f77f1..0000000 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java +++ /dev/null @@ -1,171 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.test; - -import org.apache.hadoop.hbase.metrics.BaseSource; - -/** Interface of a class to make assertions about metrics values. */ -public interface MetricsAssertHelper { - - /** - * Init helper. This method will make sure that the metrics system is set - * up for tests. - */ - void init(); - - /** - * Assert that a tag exists and has a given value. - * - * @param name The name of the tag. - * @param expected The expected value - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - */ - void assertTag(String name, String expected, BaseSource source); - - /** - * Assert that a gauge exists and that it's value is equal to the expected value. - * - * @param name The name of the gauge - * @param expected The expected value of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - */ - void assertGauge(String name, long expected, BaseSource source); - - /** - * Assert that a gauge exists and it's value is greater than a given value - * - * @param name The name of the gauge - * @param expected Value that the gauge is expected to be greater than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - */ - void assertGaugeGt(String name, long expected, BaseSource source); - - /** - * Assert that a gauge exists and it's value is less than a given value - * - * @param name The name of the gauge - * @param expected Value that the gauge is expected to be less than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - */ - void assertGaugeLt(String name, long expected, BaseSource source); - - /** - * Assert that a gauge exists and that it's value is equal to the expected value. - * - * @param name The name of the gauge - * @param expected The expected value of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - */ - void assertGauge(String name, double expected, BaseSource source); - - /** - * Assert that a gauge exists and it's value is greater than a given value - * - * @param name The name of the gauge - * @param expected Value that the gauge is expected to be greater than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - */ - void assertGaugeGt(String name, double expected, BaseSource source); - - /** - * Assert that a gauge exists and it's value is less than a given value - * - * @param name The name of the gauge - * @param expected Value that the gauge is expected to be less than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - */ - void assertGaugeLt(String name, double expected, BaseSource source); - - /** - * Assert that a counter exists and that it's value is equal to the expected value. - * - * @param name The name of the counter. - * @param expected The expected value - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - */ - void assertCounter(String name, long expected, BaseSource source); - - /** - * Assert that a counter exists and that it's value is greater than the given value. - * - * @param name The name of the counter. - * @param expected The value the counter is expected to be greater than. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - */ - void assertCounterGt(String name, long expected, BaseSource source); - - /** - * Assert that a counter exists and that it's value is less than the given value. - * - * @param name The name of the counter. - * @param expected The value the counter is expected to be less than. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - */ - void assertCounterLt(String name, long expected, BaseSource source); - - /** - * Get the value of a counter. - * - * @param name name of the counter. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - * @return long value of the counter. - */ - long getCounter(String name, BaseSource source); - - /** - * Check if a dynamic counter exists. - * - * @param name name of the counter. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - * @return boolean true id counter metric exists. - */ - boolean checkCounterExists(String name, BaseSource source); - - /** - * Get the value of a gauge as a double. - * - * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - * @return double value of the gauge. - */ - double getGaugeDouble(String name, BaseSource source); - - /** - * Get the value of a gauge as a long. - * - * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. - * @return long value of the gauge. - */ - long getGaugeLong(String name, BaseSource source); -} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java deleted file mode 100644 index f33135e..0000000 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.thrift; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Test for the interface of MetricsThriftServerSourceFactory - */ -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsThriftServerSourceFactory { - - - @Test(expected=RuntimeException.class) - public void testGetInstanceNoHadoopCompat() throws RuntimeException { - //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class); - } - -} diff --git a/hbase-hadoop-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.RandomStringGenerator b/hbase-hadoop-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.RandomStringGenerator deleted file mode 100644 index 667f3cc..0000000 --- a/hbase-hadoop-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.RandomStringGenerator +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.RandomStringGeneratorImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml deleted file mode 100644 index 100a297..0000000 --- a/hbase-hadoop2-compat/pom.xml +++ /dev/null @@ -1,216 +0,0 @@ - - - - 4.0.0 - - hbase - org.apache.hbase - 2.0.0-SNAPSHOT - .. - - - hbase-hadoop2-compat - Apache HBase - Hadoop Two Compatibility - - Interfaces to be implemented in order to smooth - over hadoop version differences - - - - - - org.apache.maven.plugins - maven-site-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - ${maven.assembly.version} - - true - - - - maven-surefire-plugin - - - - secondPartTestsExecution - test - - test - - - true - - - - - - org.apache.maven.plugins - maven-dependency-plugin - 2.4 - - - create-mrapp-generated-classpath - generate-test-resources - - build-classpath - - - - - ${project.build.directory}/test-classes/mrapp-generated-classpath - - - - - - - - - - - org.eclipse.m2e - lifecycle-mapping - 1.0.0 - - - - - - org.apache.maven.plugins - maven-dependency-plugin - [2.4,) - - build-classpath - - - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - [3.2,) - - compile - - - - - - - - - - - - - - - - - org.apache.hbase - hbase-annotations - - - jdk.tools - jdk.tools - - - - - org.apache.hbase - hbase-annotations - test-jar - test - - - org.apache.hbase - hbase-hadoop-compat - - - org.apache.hbase - hbase-hadoop-compat - ${project.version} - test-jar - test - - - org.apache.commons - commons-math - - - org.apache.hadoop - hadoop-mapreduce-client-core - ${hadoop-two.version} - - - org.apache.hadoop - hadoop-common - ${hadoop-two.version} - - - io.dropwizard.metrics - metrics-core - - - commons-lang - commons-lang - - - commons-logging - commons-logging - - - com.google.guava - guava - - - - - - - skipHadoopTwoCompatTests - - - skipHadoopTwoCompatTests - - - - true - - - - diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java deleted file mode 100644 index 4098e26..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import java.util.HashMap; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; - -@InterfaceAudience.Private -public class MetricsHBaseServerSourceFactoryImpl extends MetricsHBaseServerSourceFactory { - private enum SourceStorage { - INSTANCE; - HashMap - sources = - new HashMap(); - - } - - @Override - public MetricsHBaseServerSource create(String serverName, MetricsHBaseServerWrapper wrapper) { - return getSource(serverName, wrapper); - } - - private static synchronized MetricsHBaseServerSource getSource(String serverName, - MetricsHBaseServerWrapper wrap) { - String context = createContextName(serverName); - MetricsHBaseServerSource source = SourceStorage.INSTANCE.sources.get(context); - - if (source == null) { - //Create the source. - source = new MetricsHBaseServerSourceImpl( - context, - METRICS_DESCRIPTION, - context.toLowerCase(), - context + METRICS_JMX_CONTEXT_SUFFIX, wrap); - - //Store back in storage - SourceStorage.INSTANCE.sources.put(context, source); - } - - return source; - - } - -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java deleted file mode 100644 index 487f9f5..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java +++ /dev/null @@ -1,227 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.lib.Interns; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableHistogram; - -@InterfaceAudience.Private -public class MetricsHBaseServerSourceImpl extends BaseSourceImpl - implements MetricsHBaseServerSource { - - - private final MetricsHBaseServerWrapper wrapper; - private final MutableCounterLong authorizationSuccesses; - private final MutableCounterLong authorizationFailures; - private final MutableCounterLong authenticationSuccesses; - private final MutableCounterLong authenticationFailures; - private final MutableCounterLong authenticationFallbacks; - private final MutableCounterLong sentBytes; - private final MutableCounterLong receivedBytes; - - private final MutableCounterLong exceptions; - private final MutableCounterLong exceptionsOOO; - private final MutableCounterLong exceptionsBusy; - private final MutableCounterLong exceptionsUnknown; - private final MutableCounterLong exceptionsSanity; - private final MutableCounterLong exceptionsNSRE; - private final MutableCounterLong exceptionsMoved; - private final MutableCounterLong exceptionsMultiTooLarge; - - - private MutableHistogram queueCallTime; - private MutableHistogram processCallTime; - private MutableHistogram totalCallTime; - private MutableHistogram requestSize; - private MutableHistogram responseSize; - - public MetricsHBaseServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsHBaseServerWrapper wrapper) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - this.wrapper = wrapper; - - this.authorizationSuccesses = this.getMetricsRegistry().newCounter(AUTHORIZATION_SUCCESSES_NAME, - AUTHORIZATION_SUCCESSES_DESC, 0L); - this.authorizationFailures = this.getMetricsRegistry().newCounter(AUTHORIZATION_FAILURES_NAME, - AUTHORIZATION_FAILURES_DESC, 0L); - - this.exceptions = this.getMetricsRegistry().newCounter(EXCEPTIONS_NAME, EXCEPTIONS_DESC, 0L); - this.exceptionsOOO = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_OOO_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsBusy = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_BUSY_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsUnknown = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_UNKNOWN_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsSanity = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_SANITY_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsMoved = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_MOVED_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsNSRE = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_NSRE_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsMultiTooLarge = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L); - - this.authenticationSuccesses = this.getMetricsRegistry().newCounter( - AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L); - this.authenticationFailures = this.getMetricsRegistry().newCounter(AUTHENTICATION_FAILURES_NAME, - AUTHENTICATION_FAILURES_DESC, 0L); - this.authenticationFallbacks = this.getMetricsRegistry().newCounter( - AUTHENTICATION_FALLBACKS_NAME, AUTHENTICATION_FALLBACKS_DESC, 0L); - this.sentBytes = this.getMetricsRegistry().newCounter(SENT_BYTES_NAME, - SENT_BYTES_DESC, 0L); - this.receivedBytes = this.getMetricsRegistry().newCounter(RECEIVED_BYTES_NAME, - RECEIVED_BYTES_DESC, 0L); - this.queueCallTime = this.getMetricsRegistry().newTimeHistogram(QUEUE_CALL_TIME_NAME, - QUEUE_CALL_TIME_DESC); - this.processCallTime = this.getMetricsRegistry().newTimeHistogram(PROCESS_CALL_TIME_NAME, - PROCESS_CALL_TIME_DESC); - this.totalCallTime = this.getMetricsRegistry().newTimeHistogram(TOTAL_CALL_TIME_NAME, - TOTAL_CALL_TIME_DESC); - this.requestSize = this.getMetricsRegistry().newSizeHistogram(REQUEST_SIZE_NAME, - REQUEST_SIZE_DESC); - this.responseSize = this.getMetricsRegistry().newSizeHistogram(RESPONSE_SIZE_NAME, - RESPONSE_SIZE_DESC); - } - - @Override - public void authorizationSuccess() { - authorizationSuccesses.incr(); - } - - @Override - public void authorizationFailure() { - authorizationFailures.incr(); - } - - @Override - public void authenticationFailure() { - authenticationFailures.incr(); - } - - @Override - public void authenticationFallback() { - authenticationFallbacks.incr(); - } - - @Override - public void exception() { - exceptions.incr(); - } - - @Override - public void outOfOrderException() { - exceptionsOOO.incr(); - } - - @Override - public void failedSanityException() { - exceptionsSanity.incr(); - } - - @Override - public void movedRegionException() { - exceptionsMoved.incr(); - } - - @Override - public void notServingRegionException() { - exceptionsNSRE.incr(); - } - - @Override - public void unknownScannerException() { - exceptionsUnknown.incr(); - } - - @Override - public void tooBusyException() { - exceptionsBusy.incr(); - } - - @Override - public void multiActionTooLargeException() { - exceptionsMultiTooLarge.incr(); - } - - @Override - public void authenticationSuccess() { - authenticationSuccesses.incr(); - } - - @Override - public void sentBytes(long count) { - this.sentBytes.incr(count); - } - - @Override - public void receivedBytes(int count) { - this.receivedBytes.incr(count); - } - - @Override - public void sentResponse(long count) { this.responseSize.add(count); } - - @Override - public void receivedRequest(long count) { this.requestSize.add(count); } - - @Override - public void dequeuedCall(int qTime) { - queueCallTime.add(qTime); - } - - @Override - public void processedCall(int processingTime) { - processCallTime.add(processingTime); - } - - @Override - public void queuedAndProcessedCall(int totalTime) { - totalCallTime.add(totalTime); - } - - @Override - public void getMetrics(MetricsCollector metricsCollector, boolean all) { - MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); - - if (wrapper != null) { - mrb.addGauge(Interns.info(QUEUE_SIZE_NAME, QUEUE_SIZE_DESC), wrapper.getTotalQueueSize()) - .addGauge(Interns.info(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC), - wrapper.getGeneralQueueLength()) - .addGauge(Interns.info(REPLICATION_QUEUE_NAME, - REPLICATION_QUEUE_DESC), wrapper.getReplicationQueueLength()) - .addGauge(Interns.info(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC), - wrapper.getPriorityQueueLength()) - .addGauge(Interns.info(NUM_OPEN_CONNECTIONS_NAME, - NUM_OPEN_CONNECTIONS_DESC), wrapper.getNumOpenConnections()) - .addGauge(Interns.info(NUM_ACTIVE_HANDLER_NAME, - NUM_ACTIVE_HANDLER_DESC), wrapper.getActiveRpcHandlerCount()); - } - - metricsRegistry.snapshot(mrb, all); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java deleted file mode 100644 index 1623c00..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.mapreduce; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.Cluster; -import org.apache.hadoop.mapreduce.JobSubmissionFiles; - -/** - * Utility methods to interact with a job. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public abstract class JobUtil { - private static final Log LOG = LogFactory.getLog(JobUtil.class); - - protected JobUtil() { - super(); - } - - /** - * Initializes the staging directory and returns the path. - * - * @param conf system configuration - * @return staging directory path - * @throws IOException - * @throws InterruptedException - */ - public static Path getStagingDir(Configuration conf) - throws IOException, InterruptedException { - return JobSubmissionFiles.getStagingDir(new Cluster(conf), conf); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java deleted file mode 100644 index ccf1c1d..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; -import org.apache.hadoop.metrics2.lib.MutableHistogram; - -@InterfaceAudience.Private -public class MetricsAssignmentManagerSourceImpl extends BaseSourceImpl implements MetricsAssignmentManagerSource { - - private MutableGaugeLong ritGauge; - private MutableGaugeLong ritCountOverThresholdGauge; - private MutableGaugeLong ritOldestAgeGauge; - private MutableHistogram assignTimeHisto; - private MutableHistogram bulkAssignTimeHisto; - - public MetricsAssignmentManagerSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } - - public MetricsAssignmentManagerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - } - - public void init() { - ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l); - ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, "", 0l); - ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, "", 0l); - assignTimeHisto = metricsRegistry.newTimeHistogram(ASSIGN_TIME_NAME); - bulkAssignTimeHisto = metricsRegistry.newTimeHistogram(BULK_ASSIGN_TIME_NAME); - } - - @Override - public void updateAssignmentTime(long time) { - assignTimeHisto.add(time); - } - - @Override - public void updateBulkAssignTime(long time) { - bulkAssignTimeHisto.add(time); - } - - public void setRIT(int ritCount) { - ritGauge.set(ritCount); - } - - public void setRITCountOverThreshold(int ritCount) { - ritCountOverThresholdGauge.set(ritCount); - } - - public void setRITOldestAge(long ritCount) { - ritOldestAgeGauge.set(ritCount); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java deleted file mode 100644 index 28414ea..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.lib.MutableHistogram; - -@InterfaceAudience.Private -public class MetricsMasterFilesystemSourceImpl extends BaseSourceImpl implements MetricsMasterFileSystemSource { - - private MutableHistogram splitSizeHisto; - private MutableHistogram splitTimeHisto; - private MutableHistogram metaSplitTimeHisto; - private MutableHistogram metaSplitSizeHisto; - - public MetricsMasterFilesystemSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } - - public MetricsMasterFilesystemSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - } - - @Override - public void init() { - splitSizeHisto = metricsRegistry.newSizeHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC); - splitTimeHisto = metricsRegistry.newTimeHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC); - metaSplitTimeHisto = - metricsRegistry.newTimeHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC); - metaSplitSizeHisto = - metricsRegistry.newSizeHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC); - } - - @Override - public void updateSplitTime(long time) { - splitTimeHisto.add(time); - } - - @Override - public void updateSplitSize(long size) { - splitSizeHisto.add(size); - } - - - @Override - public void updateMetaWALSplitTime(long time) { - metaSplitTimeHisto.add(time); - } - - @Override - public void updateMetaWALSplitSize(long size) { - metaSplitSizeHisto.add(size); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java deleted file mode 100644 index e4110f6..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; - -/** - * Factory to create MetricsMasterProcSource when given a MetricsMasterWrapper - */ -@InterfaceAudience.Private -public class MetricsMasterProcSourceFactoryImpl implements MetricsMasterProcSourceFactory { - - private MetricsMasterProcSource masterProcSource; - - @Override - public synchronized MetricsMasterProcSource create(MetricsMasterWrapper masterWrapper) { - if (masterProcSource == null) { - masterProcSource = new MetricsMasterProcSourceImpl(masterWrapper); - } - return masterProcSource; - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java deleted file mode 100644 index 0375e37..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.lib.Interns; - -/** - * Hadoop2 implementation of MetricsMasterSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern - */ -@InterfaceAudience.Private -public class MetricsMasterProcSourceImpl - extends BaseSourceImpl implements MetricsMasterProcSource { - - private final MetricsMasterWrapper masterWrapper; - - public MetricsMasterProcSourceImpl(MetricsMasterWrapper masterWrapper) { - this(METRICS_NAME, - METRICS_DESCRIPTION, - METRICS_CONTEXT, - METRICS_JMX_CONTEXT, - masterWrapper); - } - - public MetricsMasterProcSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsMasterWrapper masterWrapper) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - this.masterWrapper = masterWrapper; - - } - - @Override - public void init() { - super.init(); - } - - @Override - public void getMetrics(MetricsCollector metricsCollector, boolean all) { - MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName); - - // masterWrapper can be null because this function is called inside of init. - if (masterWrapper != null) { - metricsRecordBuilder - .addGauge(Interns.info(NUM_MASTER_WALS_NAME, NUM_MASTER_WALS_DESC), - masterWrapper.getNumWALFiles()); - } - - metricsRegistry.snapshot(metricsRecordBuilder, all); - } - -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java deleted file mode 100644 index c6b31c7..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; - -/** - * Factory to create MetricsMasterSource when given a MetricsMasterWrapper - */ -@InterfaceAudience.Private -public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory { - private static enum FactoryStorage { - INSTANCE; - MetricsMasterSource masterSource; - } - - @Override - public synchronized MetricsMasterSource create(MetricsMasterWrapper masterWrapper) { - if (FactoryStorage.INSTANCE.masterSource == null) { - FactoryStorage.INSTANCE.masterSource = new MetricsMasterSourceImpl(masterWrapper); - } - return FactoryStorage.INSTANCE.masterSource; - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java deleted file mode 100644 index c5ce5e4..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.lib.Interns; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - -/** - * Hadoop2 implementation of MetricsMasterSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern - */ -@InterfaceAudience.Private -public class MetricsMasterSourceImpl - extends BaseSourceImpl implements MetricsMasterSource { - - private final MetricsMasterWrapper masterWrapper; - private MutableCounterLong clusterRequestsCounter; - - public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) { - this(METRICS_NAME, - METRICS_DESCRIPTION, - METRICS_CONTEXT, - METRICS_JMX_CONTEXT, - masterWrapper); - } - - public MetricsMasterSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsMasterWrapper masterWrapper) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - this.masterWrapper = masterWrapper; - - } - - @Override - public void init() { - super.init(); - clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0l); - } - - @Override - public void incRequests(final long inc) { - this.clusterRequestsCounter.incr(inc); - } - - @Override - public void getMetrics(MetricsCollector metricsCollector, boolean all) { - - MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName); - - // masterWrapper can be null because this function is called inside of init. - if (masterWrapper != null) { - metricsRecordBuilder - .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME, - MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime()) - .addGauge(Interns.info(MASTER_START_TIME_NAME, - MASTER_START_TIME_DESC), masterWrapper.getStartTime()) - .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC), - masterWrapper.getAverageLoad()) - .tag(Interns.info(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC), - masterWrapper.getRegionServers()) - .addGauge(Interns.info(NUM_REGION_SERVERS_NAME, - NUMBER_OF_REGION_SERVERS_DESC), masterWrapper.getNumRegionServers()) - .tag(Interns.info(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC), - masterWrapper.getDeadRegionServers()) - .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME, - NUMBER_OF_DEAD_REGION_SERVERS_DESC), - masterWrapper.getNumDeadRegionServers()) - .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), - masterWrapper.getZookeeperQuorum()) - .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName()) - .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId()) - .tag(Interns.info(IS_ACTIVE_MASTER_NAME, - IS_ACTIVE_MASTER_DESC), - String.valueOf(masterWrapper.getIsActiveMaster())); - } - - metricsRegistry.snapshot(metricsRecordBuilder, all); - } - -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java deleted file mode 100644 index c2fc6b9..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.lib.MutableHistogram; - -@InterfaceAudience.Private -public class MetricsSnapshotSourceImpl extends BaseSourceImpl implements MetricsSnapshotSource { - - private MutableHistogram snapshotTimeHisto; - private MutableHistogram snapshotCloneTimeHisto; - private MutableHistogram snapshotRestoreTimeHisto; - - public MetricsSnapshotSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } - - public MetricsSnapshotSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - } - - @Override - public void init() { - snapshotTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC); - snapshotCloneTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC); - snapshotRestoreTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC); - } - - @Override - public void updateSnapshotTime(long time) { - snapshotTimeHisto.add(time); - } - - @Override - public void updateSnapshotCloneTime(long time) { - snapshotCloneTimeHisto.add(time); - } - - @Override - public void updateSnapshotRestoreTime(long time) { - snapshotRestoreTimeHisto.add(time); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java deleted file mode 100644 index da34df2..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master.balancer; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableHistogram; - -@InterfaceAudience.Private -public class MetricsBalancerSourceImpl extends BaseSourceImpl implements MetricsBalancerSource{ - - private MutableHistogram blanceClusterHisto; - private MutableCounterLong miscCount; - - public MetricsBalancerSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } - - public MetricsBalancerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - } - - - @Override - public void init() { - blanceClusterHisto = metricsRegistry.newTimeHistogram(BALANCE_CLUSTER); - miscCount = metricsRegistry.newCounter(MISC_INVOATION_COUNT, "", 0L); - - } - - @Override - public void updateBalanceCluster(long time) { - blanceClusterHisto.add(time); - } - - @Override - public void incrMiscInvocations() { - miscCount.incr(); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java deleted file mode 100644 index ded0a0c..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master.balancer; - -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.lib.Interns; - -@InterfaceAudience.Private -public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceImpl implements - MetricsStochasticBalancerSource { - private static final String TABLE_FUNCTION_SEP = "_"; - - // Most Recently Used(MRU) cache - private static final float MRU_LOAD_FACTOR = 0.75f; - private int metricsSize = 1000; - private int mruCap = calcMruCap(metricsSize); - - private Map> stochasticCosts = - new LinkedHashMap>(mruCap, MRU_LOAD_FACTOR, true) { - private static final long serialVersionUID = 8204713453436906599L; - - @Override - protected boolean removeEldestEntry(Map.Entry> eldest) { - return size() > mruCap; - } - }; - private Map costFunctionDescs = new ConcurrentHashMap(); - - /** - * Calculates the mru cache capacity from the metrics size - */ - private static int calcMruCap(int metricsSize) { - return (int) Math.ceil(metricsSize / MRU_LOAD_FACTOR) + 1; - } - - @Override - public void updateMetricsSize(int size) { - if (size > 0) { - metricsSize = size; - mruCap = calcMruCap(size); - } - } - - /** - * Reports stochastic load balancer costs to JMX - */ - public void updateStochasticCost(String tableName, String costFunctionName, String functionDesc, - Double cost) { - if (tableName == null || costFunctionName == null || cost == null) { - return; - } - - if (functionDesc != null) { - costFunctionDescs.put(costFunctionName, functionDesc); - } - - synchronized (stochasticCosts) { - Map costs = stochasticCosts.get(tableName); - if (costs == null) { - costs = new ConcurrentHashMap(); - } - - costs.put(costFunctionName, cost); - stochasticCosts.put(tableName, costs); - } - } - - @Override - public void getMetrics(MetricsCollector metricsCollector, boolean all) { - MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName); - - if (stochasticCosts != null) { - synchronized (stochasticCosts) { - for (Map.Entry> tableEntry : stochasticCosts.entrySet()) { - for (Map.Entry costEntry : tableEntry.getValue().entrySet()) { - String attrName = tableEntry.getKey() + TABLE_FUNCTION_SEP + costEntry.getKey(); - Double cost = costEntry.getValue(); - String functionDesc = costFunctionDescs.get(costEntry.getKey()); - if (functionDesc == null) functionDesc = costEntry.getKey(); - metricsRecordBuilder.addGauge(Interns.info(attrName, functionDesc), cost); - } - } - } - } - metricsRegistry.snapshot(metricsRecordBuilder, all); - } - -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java deleted file mode 100644 index 6756a21..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java +++ /dev/null @@ -1,176 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.metrics; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.impl.JmxCacheBuster; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; -import org.apache.hadoop.metrics2.lib.MetricMutableQuantiles; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; -import org.apache.hadoop.metrics2.lib.MutableHistogram; -import org.apache.hadoop.metrics2.source.JvmMetrics; - -/** - * Hadoop 2 implementation of BaseSource (using metrics2 framework). It handles registration to - * DefaultMetricsSystem and creation of the metrics registry. - * - * All MetricsSource's in hbase-hadoop2-compat should derive from this class. - */ -@InterfaceAudience.Private -public class BaseSourceImpl implements BaseSource, MetricsSource { - - private static enum DefaultMetricsSystemInitializer { - INSTANCE; - private boolean inited = false; - - synchronized void init(String name) { - if (inited) return; - inited = true; - DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME); - JvmMetrics.initSingleton(name, ""); - } - } - - protected final DynamicMetricsRegistry metricsRegistry; - protected final String metricsName; - protected final String metricsDescription; - protected final String metricsContext; - protected final String metricsJmxContext; - - public BaseSourceImpl( - String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { - - this.metricsName = metricsName; - this.metricsDescription = metricsDescription; - this.metricsContext = metricsContext; - this.metricsJmxContext = metricsJmxContext; - - metricsRegistry = new DynamicMetricsRegistry(metricsName).setContext(metricsContext); - DefaultMetricsSystemInitializer.INSTANCE.init(metricsName); - - //Register this instance. - DefaultMetricsSystem.instance().register(metricsJmxContext, metricsDescription, this); - init(); - - } - - public void init() { - this.metricsRegistry.clearMetrics(); - } - - /** - * Set a single gauge to a value. - * - * @param gaugeName gauge name - * @param value the new value of the gauge. - */ - public void setGauge(String gaugeName, long value) { - MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, value); - gaugeInt.set(value); - } - - /** - * Add some amount to a gauge. - * - * @param gaugeName The name of the gauge to increment. - * @param delta The amount to increment the gauge by. - */ - public void incGauge(String gaugeName, long delta) { - MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l); - gaugeInt.incr(delta); - } - - /** - * Decrease the value of a named gauge. - * - * @param gaugeName The name of the gauge. - * @param delta the ammount to subtract from a gauge value. - */ - public void decGauge(String gaugeName, long delta) { - MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l); - gaugeInt.decr(delta); - } - - /** - * Increment a named counter by some value. - * - * @param key the name of the counter - * @param delta the ammount to increment - */ - public void incCounters(String key, long delta) { - MutableCounterLong counter = metricsRegistry.getLongCounter(key, 0l); - counter.incr(delta); - - } - - @Override - public void updateHistogram(String name, long value) { - MutableHistogram histo = metricsRegistry.getHistogram(name); - histo.add(value); - } - - @Override - public void updateQuantile(String name, long value) { - MetricMutableQuantiles histo = metricsRegistry.getQuantile(name); - histo.add(value); - } - - /** - * Remove a named gauge. - * - * @param key - */ - public void removeMetric(String key) { - metricsRegistry.removeMetric(key); - JmxCacheBuster.clearJmxCache(); - } - - @Override - public void getMetrics(MetricsCollector metricsCollector, boolean all) { - metricsRegistry.snapshot(metricsCollector.addRecord(metricsRegistry.info()), all); - } - - public DynamicMetricsRegistry getMetricsRegistry() { - return metricsRegistry; - } - - public String getMetricsContext() { - return metricsContext; - } - - public String getMetricsDescription() { - return metricsDescription; - } - - public String getMetricsJmxContext() { - return metricsJmxContext; - } - - public String getMetricsName() { - return metricsName; - } - -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java deleted file mode 100644 index c12700f..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.metrics; - -import javax.management.ObjectName; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.util.MBeans; - -/** - * Hadoop2 metrics2 implementation of an object that registers MBeans. - */ -@InterfaceAudience.Private -public class MBeanSourceImpl implements MBeanSource { - - /** - * Register an mbean with the underlying metrics system - * @param serviceName Metrics service/system name - * @param metricsName name of the metrics obejct to expose - * @param theMbean the actual MBean - * @return ObjectName from jmx - */ - @Override - public ObjectName register(String serviceName, String metricsName, Object theMbean) { - return MBeans.register(serviceName, metricsName, theMbean); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java deleted file mode 100644 index 1835f6b..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -import java.util.Collections; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.impl.JmxCacheBuster; -import org.apache.hadoop.metrics2.lib.Interns; -import org.apache.hadoop.metrics2.lib.MetricsExecutorImpl; - -@InterfaceAudience.Private -public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl - implements MetricsRegionAggregateSource { - - private static final Log LOG = LogFactory.getLog(MetricsRegionAggregateSourceImpl.class); - - private final MetricsExecutorImpl executor = new MetricsExecutorImpl(); - - private final Set regionSources = - Collections.newSetFromMap(new ConcurrentHashMap()); - - public MetricsRegionAggregateSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } - - - public MetricsRegionAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - - // Every few mins clean the JMX cache. - executor.getExecutor().scheduleWithFixedDelay(new Runnable() { - public void run() { - JmxCacheBuster.clearJmxCache(); - } - }, 5, 5, TimeUnit.MINUTES); - } - - @Override - public void register(MetricsRegionSource source) { - regionSources.add(source); - clearCache(); - } - - @Override - public void deregister(MetricsRegionSource toRemove) { - try { - regionSources.remove(toRemove); - } catch (Exception e) { - // Ignored. If this errors out it means that someone is double - // closing the region source and the region is already nulled out. - LOG.info( - "Error trying to remove " + toRemove + " from " + this.getClass().getSimpleName(), - e); - } - clearCache(); - } - - private synchronized void clearCache() { - JmxCacheBuster.clearJmxCache(); - } - - /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects - * getMetrics to push the metrics into the collector. - * - * @param collector the collector - * @param all get all the metrics regardless of when they last changed. - */ - @Override - public void getMetrics(MetricsCollector collector, boolean all) { - MetricsRecordBuilder mrb = collector.addRecord(metricsName); - - if (regionSources != null) { - for (MetricsRegionSource regionMetricSource : regionSources) { - if (regionMetricSource instanceof MetricsRegionSourceImpl) { - ((MetricsRegionSourceImpl) regionMetricSource).snapshot(mrb, all); - } - } - mrb.addGauge(Interns.info(NUM_REGIONS, NUMBER_OF_REGIONS_DESC), regionSources.size()); - metricsRegistry.snapshot(mrb, all); - } - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java deleted file mode 100644 index c483083..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; - -/** - * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper - */ -@InterfaceAudience.Private -public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory { - public static enum FactoryStorage { - INSTANCE; - private Object aggLock = new Object(); - private MetricsRegionAggregateSourceImpl aggImpl; - } - - private synchronized MetricsRegionAggregateSourceImpl getAggregate() { - synchronized (FactoryStorage.INSTANCE.aggLock) { - if (FactoryStorage.INSTANCE.aggImpl == null) { - FactoryStorage.INSTANCE.aggImpl = new MetricsRegionAggregateSourceImpl(); - } - return FactoryStorage.INSTANCE.aggImpl; - } - } - - - @Override - public synchronized MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper) { - return new MetricsRegionServerSourceImpl(regionServerWrapper); - } - - @Override - public MetricsRegionSource createRegion(MetricsRegionWrapper wrapper) { - return new MetricsRegionSourceImpl(wrapper, getAggregate()); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java deleted file mode 100644 index f40811c..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ /dev/null @@ -1,318 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.MetricHistogram; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.lib.Interns; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - -/** - * Hadoop2 implementation of MetricsRegionServerSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern - */ -@InterfaceAudience.Private -public class MetricsRegionServerSourceImpl - extends BaseSourceImpl implements MetricsRegionServerSource { - - - final MetricsRegionServerWrapper rsWrap; - private final MetricHistogram putHisto; - private final MetricHistogram deleteHisto; - private final MetricHistogram getHisto; - private final MetricHistogram incrementHisto; - private final MetricHistogram appendHisto; - private final MetricHistogram replayHisto; - private final MetricHistogram scanNextHisto; - - private final MutableCounterLong slowPut; - private final MutableCounterLong slowDelete; - private final MutableCounterLong slowGet; - private final MutableCounterLong slowIncrement; - private final MutableCounterLong slowAppend; - private final MutableCounterLong splitRequest; - private final MutableCounterLong splitSuccess; - - private final MetricHistogram splitTimeHisto; - private final MetricHistogram flushTimeHisto; - - public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap); - } - - public MetricsRegionServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsRegionServerWrapper rsWrap) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - this.rsWrap = rsWrap; - - putHisto = getMetricsRegistry().newTimeHistogram(MUTATE_KEY); - slowPut = getMetricsRegistry().newCounter(SLOW_MUTATE_KEY, SLOW_MUTATE_DESC, 0L); - - deleteHisto = getMetricsRegistry().newTimeHistogram(DELETE_KEY); - slowDelete = getMetricsRegistry().newCounter(SLOW_DELETE_KEY, SLOW_DELETE_DESC, 0L); - - getHisto = getMetricsRegistry().newTimeHistogram(GET_KEY); - slowGet = getMetricsRegistry().newCounter(SLOW_GET_KEY, SLOW_GET_DESC, 0L); - - incrementHisto = getMetricsRegistry().newTimeHistogram(INCREMENT_KEY); - slowIncrement = getMetricsRegistry().newCounter(SLOW_INCREMENT_KEY, SLOW_INCREMENT_DESC, 0L); - - appendHisto = getMetricsRegistry().newTimeHistogram(APPEND_KEY); - slowAppend = getMetricsRegistry().newCounter(SLOW_APPEND_KEY, SLOW_APPEND_DESC, 0L); - - replayHisto = getMetricsRegistry().newTimeHistogram(REPLAY_KEY); - scanNextHisto = getMetricsRegistry().newTimeHistogram(SCAN_NEXT_KEY); - - splitTimeHisto = getMetricsRegistry().newTimeHistogram(SPLIT_KEY); - flushTimeHisto = getMetricsRegistry().newTimeHistogram(FLUSH_KEY); - - splitRequest = getMetricsRegistry().newCounter(SPLIT_REQUEST_KEY, SPLIT_REQUEST_DESC, 0L); - splitSuccess = getMetricsRegistry().newCounter(SPLIT_SUCCESS_KEY, SPLIT_SUCCESS_DESC, 0L); - } - - @Override - public void updatePut(long t) { - putHisto.add(t); - } - - @Override - public void updateDelete(long t) { - deleteHisto.add(t); - } - - @Override - public void updateGet(long t) { - getHisto.add(t); - } - - @Override - public void updateIncrement(long t) { - incrementHisto.add(t); - } - - @Override - public void updateAppend(long t) { - appendHisto.add(t); - } - - @Override - public void updateReplay(long t) { - replayHisto.add(t); - } - - @Override - public void updateScannerNext(long scanSize) { - scanNextHisto.add(scanSize); - } - - @Override - public void incrSlowPut() { - slowPut.incr(); - } - - @Override - public void incrSlowDelete() { - slowDelete.incr(); - } - - @Override - public void incrSlowGet() { - slowGet.incr(); - } - - @Override - public void incrSlowIncrement() { - slowIncrement.incr(); - } - - @Override - public void incrSlowAppend() { - slowAppend.incr(); - } - - @Override - public void incrSplitRequest() { - splitRequest.incr(); - } - - @Override - public void incrSplitSuccess() { - splitSuccess.incr(); - } - - @Override - public void updateSplitTime(long t) { - splitTimeHisto.add(t); - } - - @Override - public void updateFlushTime(long t) { - flushTimeHisto.add(t); - } - - /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects - * getMetrics to push the metrics into the collector. - * - * @param metricsCollector Collector to accept metrics - * @param all push all or only changed? - */ - @Override - public void getMetrics(MetricsCollector metricsCollector, boolean all) { - - MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); - - // rsWrap can be null because this function is called inside of init. - if (rsWrap != null) { - mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions()) - .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores()) - .addGauge(Interns.info(WALFILE_COUNT, WALFILE_COUNT_DESC), rsWrap.getNumWALFiles()) - .addGauge(Interns.info(WALFILE_SIZE, WALFILE_SIZE_DESC), rsWrap.getWALFileSize()) - .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles()) - .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemstoreSize()) - .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()) - .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC), - rsWrap.getStartCode()) - .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), - rsWrap.getTotalRequestCount()) - .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), - rsWrap.getReadRequestsCount()) - .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), - rsWrap.getWriteRequestsCount()) - .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), - rsWrap.getCheckAndMutateChecksFailed()) - .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), - rsWrap.getCheckAndMutateChecksPassed()) - .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC), - rsWrap.getStoreFileIndexSize()) - .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC), - rsWrap.getTotalStaticIndexSize()) - .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC), - rsWrap.getTotalStaticBloomSize()) - .addGauge( - Interns.info(NUMBER_OF_MUTATIONS_WITHOUT_WAL, NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC), - rsWrap.getNumMutationsWithoutWAL()) - .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC), - rsWrap.getDataInMemoryWithoutWAL()) - .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), - rsWrap.getPercentFileLocal()) - .addGauge(Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, - PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), - rsWrap.getPercentFileLocalSecondaryRegions()) - .addGauge(Interns.info(SPLIT_QUEUE_LENGTH, SPLIT_QUEUE_LENGTH_DESC), - rsWrap.getSplitQueueSize()) - .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), - rsWrap.getCompactionQueueSize()) - .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), - rsWrap.getFlushQueueSize()) - .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), - rsWrap.getBlockCacheFreeSize()) - .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), - rsWrap.getBlockCacheCount()) - .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC), - rsWrap.getBlockCacheSize()) - .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), - rsWrap.getBlockCacheHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_HIT_COUNT, - BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC), rsWrap.getBlockCachePrimaryHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC), - rsWrap.getBlockCacheMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_MISS_COUNT, - BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC), rsWrap.getBlockCachePrimaryMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC), - rsWrap.getBlockCacheEvictedCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_EVICTION_COUNT, - BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC), rsWrap.getBlockCachePrimaryEvictedCount()) - .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC), - rsWrap.getBlockCacheHitPercent()) - .addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, - BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent()) - .addCounter(Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT, - BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC),rsWrap.getBlockCacheFailedInsertions()) - .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), - rsWrap.getUpdatesBlockedTime()) - .addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), - rsWrap.getFlushedCellsCount()) - .addCounter(Interns.info(COMPACTED_CELLS, COMPACTED_CELLS_DESC), - rsWrap.getCompactedCellsCount()) - .addCounter(Interns.info(MAJOR_COMPACTED_CELLS, MAJOR_COMPACTED_CELLS_DESC), - rsWrap.getMajorCompactedCellsCount()) - .addCounter(Interns.info(FLUSHED_CELLS_SIZE, FLUSHED_CELLS_SIZE_DESC), - rsWrap.getFlushedCellsSize()) - .addCounter(Interns.info(COMPACTED_CELLS_SIZE, COMPACTED_CELLS_SIZE_DESC), - rsWrap.getCompactedCellsSize()) - .addCounter(Interns.info(MAJOR_COMPACTED_CELLS_SIZE, MAJOR_COMPACTED_CELLS_SIZE_DESC), - rsWrap.getMajorCompactedCellsSize()) - - .addCounter( - Interns.info(CELLS_COUNT_COMPACTED_FROM_MOB, CELLS_COUNT_COMPACTED_FROM_MOB_DESC), - rsWrap.getCellsCountCompactedFromMob()) - .addCounter(Interns.info(CELLS_COUNT_COMPACTED_TO_MOB, CELLS_COUNT_COMPACTED_TO_MOB_DESC), - rsWrap.getCellsCountCompactedToMob()) - .addCounter( - Interns.info(CELLS_SIZE_COMPACTED_FROM_MOB, CELLS_SIZE_COMPACTED_FROM_MOB_DESC), - rsWrap.getCellsSizeCompactedFromMob()) - .addCounter(Interns.info(CELLS_SIZE_COMPACTED_TO_MOB, CELLS_SIZE_COMPACTED_TO_MOB_DESC), - rsWrap.getCellsSizeCompactedToMob()) - .addCounter(Interns.info(MOB_FLUSH_COUNT, MOB_FLUSH_COUNT_DESC), - rsWrap.getMobFlushCount()) - .addCounter(Interns.info(MOB_FLUSHED_CELLS_COUNT, MOB_FLUSHED_CELLS_COUNT_DESC), - rsWrap.getMobFlushedCellsCount()) - .addCounter(Interns.info(MOB_FLUSHED_CELLS_SIZE, MOB_FLUSHED_CELLS_SIZE_DESC), - rsWrap.getMobFlushedCellsSize()) - .addCounter(Interns.info(MOB_SCAN_CELLS_COUNT, MOB_SCAN_CELLS_COUNT_DESC), - rsWrap.getMobScanCellsCount()) - .addCounter(Interns.info(MOB_SCAN_CELLS_SIZE, MOB_SCAN_CELLS_SIZE_DESC), - rsWrap.getMobScanCellsSize()) - .addGauge(Interns.info(MOB_FILE_CACHE_COUNT, MOB_FILE_CACHE_COUNT_DESC), - rsWrap.getMobFileCacheCount()) - .addCounter(Interns.info(MOB_FILE_CACHE_ACCESS_COUNT, MOB_FILE_CACHE_ACCESS_COUNT_DESC), - rsWrap.getMobFileCacheAccessCount()) - .addCounter(Interns.info(MOB_FILE_CACHE_MISS_COUNT, MOB_FILE_CACHE_MISS_COUNT_DESC), - rsWrap.getMobFileCacheMissCount()) - .addCounter( - Interns.info(MOB_FILE_CACHE_EVICTED_COUNT, MOB_FILE_CACHE_EVICTED_COUNT_DESC), - rsWrap.getMobFileCacheEvictedCount()) - .addGauge(Interns.info(MOB_FILE_CACHE_HIT_PERCENT, MOB_FILE_CACHE_HIT_PERCENT_DESC), - rsWrap.getMobFileCacheHitPercent()) - - .addCounter(Interns.info(HEDGED_READS, HEDGED_READS_DESC), rsWrap.getHedgedReadOps()) - .addCounter(Interns.info(HEDGED_READ_WINS, HEDGED_READ_WINS_DESC), - rsWrap.getHedgedReadWins()) - - .addCounter(Interns.info(BLOCKED_REQUESTS_COUNT, BLOCKED_REQUESTS_COUNT_DESC), - rsWrap.getBlockedRequestsCount()) - - .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), - rsWrap.getZookeeperQuorum()) - .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()) - .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId()); - } - - metricsRegistry.snapshot(mrb, all); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java deleted file mode 100644 index 1df72d5..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ /dev/null @@ -1,259 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; -import org.apache.hadoop.metrics2.lib.Interns; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableHistogram; - -@InterfaceAudience.Private -public class MetricsRegionSourceImpl implements MetricsRegionSource { - - private static final Log LOG = LogFactory.getLog(MetricsRegionSourceImpl.class); - - private AtomicBoolean closed = new AtomicBoolean(false); - - // Non-final so that we can null out the wrapper - // This is just paranoia. We really really don't want to - // leak a whole region by way of keeping the - // regionWrapper around too long. - private MetricsRegionWrapper regionWrapper; - - private final MetricsRegionAggregateSourceImpl agg; - private final DynamicMetricsRegistry registry; - - private final String regionNamePrefix; - private final String regionPutKey; - private final String regionDeleteKey; - private final String regionGetKey; - private final String regionIncrementKey; - private final String regionAppendKey; - private final String regionScanNextKey; - - private final MutableCounterLong regionPut; - private final MutableCounterLong regionDelete; - private final MutableCounterLong regionIncrement; - private final MutableCounterLong regionAppend; - private final MutableHistogram regionGet; - private final MutableHistogram regionScanNext; - private final int hashCode; - - public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper, - MetricsRegionAggregateSourceImpl aggregate) { - this.regionWrapper = regionWrapper; - agg = aggregate; - agg.register(this); - - LOG.debug("Creating new MetricsRegionSourceImpl for table " + - regionWrapper.getTableName() + " " + regionWrapper.getRegionName()); - - registry = agg.getMetricsRegistry(); - - regionNamePrefix = "Namespace_" + regionWrapper.getNamespace() + - "_table_" + regionWrapper.getTableName() + - "_region_" + regionWrapper.getRegionName() + - "_metric_"; - - String suffix = "Count"; - - regionPutKey = regionNamePrefix + MetricsRegionServerSource.MUTATE_KEY + suffix; - regionPut = registry.getLongCounter(regionPutKey, 0L); - - regionDeleteKey = regionNamePrefix + MetricsRegionServerSource.DELETE_KEY + suffix; - regionDelete = registry.getLongCounter(regionDeleteKey, 0L); - - regionIncrementKey = regionNamePrefix + MetricsRegionServerSource.INCREMENT_KEY + suffix; - regionIncrement = registry.getLongCounter(regionIncrementKey, 0L); - - regionAppendKey = regionNamePrefix + MetricsRegionServerSource.APPEND_KEY + suffix; - regionAppend = registry.getLongCounter(regionAppendKey, 0L); - - regionGetKey = regionNamePrefix + MetricsRegionServerSource.GET_KEY; - regionGet = registry.newTimeHistogram(regionGetKey); - - regionScanNextKey = regionNamePrefix + MetricsRegionServerSource.SCAN_NEXT_KEY; - regionScanNext = registry.newTimeHistogram(regionScanNextKey); - - hashCode = regionWrapper.getRegionHashCode(); - } - - @Override - public void close() { - boolean wasClosed = closed.getAndSet(true); - - // Has someone else already closed this for us? - if (wasClosed) { - return; - } - - // Before removing the metrics remove this region from the aggregate region bean. - // This should mean that it's unlikely that snapshot and close happen at the same time. - agg.deregister(this); - - // While it's un-likely that snapshot and close happen at the same time it's still possible. - // So grab the lock to ensure that all calls to snapshot are done before we remove the metrics - synchronized (this) { - if (LOG.isTraceEnabled()) { - LOG.trace("Removing region Metrics: " + regionWrapper.getRegionName()); - } - - registry.removeMetric(regionPutKey); - registry.removeMetric(regionDeleteKey); - registry.removeMetric(regionIncrementKey); - registry.removeMetric(regionAppendKey); - registry.removeMetric(regionGetKey); - registry.removeMetric(regionScanNextKey); - registry.removeHistogramMetrics(regionGetKey); - registry.removeHistogramMetrics(regionScanNextKey); - - regionWrapper = null; - } - } - - @Override - public void updatePut() { - regionPut.incr(); - } - - @Override - public void updateDelete() { - regionDelete.incr(); - } - - @Override - public void updateGet(long getSize) { - regionGet.add(getSize); - } - - @Override - public void updateScan(long scanSize) { - regionScanNext.add(scanSize); - } - - @Override - public void updateIncrement() { - regionIncrement.incr(); - } - - @Override - public void updateAppend() { - regionAppend.incr(); - } - - @Override - public MetricsRegionAggregateSource getAggregateSource() { - return agg; - } - - @Override - public int compareTo(MetricsRegionSource source) { - if (!(source instanceof MetricsRegionSourceImpl)) { - return -1; - } - - MetricsRegionSourceImpl impl = (MetricsRegionSourceImpl) source; - if (impl == null) { - return -1; - } - - return Long.compare(hashCode, impl.hashCode); - } - - void snapshot(MetricsRecordBuilder mrb, boolean ignored) { - - // If there is a close that started be double extra sure - // that we're not getting any locks and not putting data - // into the metrics that should be removed. So early out - // before even getting the lock. - if (closed.get()) { - return; - } - - // Grab the read - // This ensures that removes of the metrics - // can't happen while we are putting them back in. - synchronized (this) { - - // It's possible that a close happened between checking - // the closed variable and getting the lock. - if (closed.get()) { - return; - } - - mrb.addGauge( - Interns.info( - regionNamePrefix + MetricsRegionServerSource.STORE_COUNT, - MetricsRegionServerSource.STORE_COUNT_DESC), - this.regionWrapper.getNumStores()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, - MetricsRegionServerSource.STOREFILE_COUNT_DESC), - this.regionWrapper.getNumStoreFiles()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, - MetricsRegionServerSource.MEMSTORE_SIZE_DESC), - this.regionWrapper.getMemstoreSize()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, - MetricsRegionServerSource.STOREFILE_SIZE_DESC), - this.regionWrapper.getStoreFileSize()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT, - MetricsRegionSource.COMPACTIONS_COMPLETED_DESC), - this.regionWrapper.getNumCompactionsCompleted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.NUM_BYTES_COMPACTED_COUNT, - MetricsRegionSource.NUM_BYTES_COMPACTED_DESC), - this.regionWrapper.getNumBytesCompacted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.NUM_FILES_COMPACTED_COUNT, - MetricsRegionSource.NUM_FILES_COMPACTED_DESC), - this.regionWrapper.getNumFilesCompacted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, - MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), - this.regionWrapper.getReadRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, - MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), - this.regionWrapper.getWriteRequestCount()); - mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.REPLICA_ID, - MetricsRegionSource.REPLICA_ID_DESC), - this.regionWrapper.getReplicaId()); - } - } - - @Override - public int hashCode() { - return hashCode; - } - - @Override - public boolean equals(Object obj) { - return obj == this || - (obj instanceof MetricsRegionSourceImpl && compareTo((MetricsRegionSourceImpl) obj) == 0); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java deleted file mode 100644 index 787fe76..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver.wal; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.MetricHistogram; - -/** - * Hadoop1 implementation of MetricsMasterSource. Implements BaseSource through BaseSourceImpl, - * following the pattern - */ -@InterfaceAudience.Private -public class MetricsEditsReplaySourceImpl extends BaseSourceImpl implements - MetricsEditsReplaySource { - - private static final Log LOG = LogFactory.getLog(MetricsEditsReplaySourceImpl.class.getName()); - - private MetricHistogram replayTimeHisto; - private MetricHistogram replayBatchSizeHisto; - private MetricHistogram replayDataSizeHisto; - - public MetricsEditsReplaySourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } - - public MetricsEditsReplaySourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - } - - @Override - public void init() { - super.init(); - replayTimeHisto = metricsRegistry.newTimeHistogram(REPLAY_TIME_NAME, REPLAY_TIME_DESC); - replayBatchSizeHisto = metricsRegistry.newSizeHistogram(REPLAY_BATCH_SIZE_NAME, - REPLAY_BATCH_SIZE_DESC); - replayDataSizeHisto = metricsRegistry - .newSizeHistogram(REPLAY_DATA_SIZE_NAME, REPLAY_DATA_SIZE_DESC); - } - - @Override - public void updateReplayTime(long time) { - replayTimeHisto.add(time); - } - - @Override - public void updateReplayBatchSize(long size) { - replayBatchSizeHisto.add(size); - } - - @Override - public void updateReplayDataSize(long size) { - replayDataSizeHisto.add(size); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java deleted file mode 100644 index a149d1b..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver.wal; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.MetricHistogram; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - - -/** - * Class that transitions metrics from MetricsWAL into the metrics subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern. - * @see org.apache.hadoop.hbase.regionserver.wal.MetricsWAL - */ -@InterfaceAudience.Private -public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSource { - - private final MetricHistogram appendSizeHisto; - private final MetricHistogram appendTimeHisto; - private final MetricHistogram syncTimeHisto; - private final MutableCounterLong appendCount; - private final MutableCounterLong slowAppendCount; - private final MutableCounterLong logRollRequested; - private final MutableCounterLong lowReplicationLogRollRequested; - - public MetricsWALSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } - - public MetricsWALSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - - //Create and store the metrics that will be used. - appendTimeHisto = this.getMetricsRegistry().newTimeHistogram(APPEND_TIME, APPEND_TIME_DESC); - appendSizeHisto = this.getMetricsRegistry().newSizeHistogram(APPEND_SIZE, APPEND_SIZE_DESC); - appendCount = this.getMetricsRegistry().newCounter(APPEND_COUNT, APPEND_COUNT_DESC, 0l); - slowAppendCount = - this.getMetricsRegistry().newCounter(SLOW_APPEND_COUNT, SLOW_APPEND_COUNT_DESC, 0l); - syncTimeHisto = this.getMetricsRegistry().newTimeHistogram(SYNC_TIME, SYNC_TIME_DESC); - logRollRequested = - this.getMetricsRegistry().newCounter(ROLL_REQUESTED, ROLL_REQUESTED_DESC, 0L); - lowReplicationLogRollRequested = this.getMetricsRegistry() - .newCounter(LOW_REPLICA_ROLL_REQUESTED, LOW_REPLICA_ROLL_REQUESTED_DESC, 0L); - } - - @Override - public void incrementAppendSize(long size) { - appendSizeHisto.add(size); - } - - @Override - public void incrementAppendTime(long time) { - appendTimeHisto.add(time); - } - - @Override - public void incrementAppendCount() { - appendCount.incr(); - } - - @Override - public void incrementSlowAppendCount() { - slowAppendCount.incr(); - } - - @Override - public void incrementSyncTime(long time) { - syncTimeHisto.add(time); - } - - @Override - public void incrementLogRollRequested() { - logRollRequested.incr(); - } - - @Override - public void incrementLowReplicationLogRoll() { - lowReplicationLogRollRequested.incr(); - } - - @Override - public long getSlowAppendCount() { - return slowAppendCount.value(); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java deleted file mode 100644 index 392cd39..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver; - -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; - -public class MetricsReplicationGlobalSourceSource implements MetricsReplicationSourceSource{ - private final MetricsReplicationSourceImpl rms; - - private final MutableGaugeLong ageOfLastShippedOpGauge; - private final MutableGaugeLong sizeOfLogQueueGauge; - private final MutableCounterLong logReadInEditsCounter; - private final MutableCounterLong logEditsFilteredCounter; - private final MutableCounterLong shippedBatchesCounter; - private final MutableCounterLong shippedOpsCounter; - private final MutableCounterLong shippedKBsCounter; - private final MutableCounterLong logReadInBytesCounter; - private final MutableCounterLong shippedHFilesCounter; - private final MutableGaugeLong sizeOfHFileRefsQueueGauge; - - public MetricsReplicationGlobalSourceSource(MetricsReplicationSourceImpl rms) { - this.rms = rms; - - ageOfLastShippedOpGauge = rms.getMetricsRegistry().getLongGauge(SOURCE_AGE_OF_LAST_SHIPPED_OP, 0L); - - sizeOfLogQueueGauge = rms.getMetricsRegistry().getLongGauge(SOURCE_SIZE_OF_LOG_QUEUE, 0L); - - shippedBatchesCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_SHIPPED_BATCHES, 0L); - - shippedOpsCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_SHIPPED_OPS, 0L); - - shippedKBsCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_SHIPPED_KBS, 0L); - - logReadInBytesCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_LOG_READ_IN_BYTES, 0L); - - logReadInEditsCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_LOG_READ_IN_EDITS, 0L); - - logEditsFilteredCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_LOG_EDITS_FILTERED, 0L); - - shippedHFilesCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_SHIPPED_HFILES, 0L); - - sizeOfHFileRefsQueueGauge = - rms.getMetricsRegistry().getLongGauge(SOURCE_SIZE_OF_HFILE_REFS_QUEUE, 0L); - } - - @Override public void setLastShippedAge(long age) { - ageOfLastShippedOpGauge.set(age); - } - - @Override public void setSizeOfLogQueue(int size) { - sizeOfLogQueueGauge.set(size); - } - - @Override public void incrSizeOfLogQueue(int size) { - sizeOfLogQueueGauge.incr(size); - } - - @Override public void decrSizeOfLogQueue(int size) { - sizeOfLogQueueGauge.decr(size); - } - - @Override public void incrLogReadInEdits(long size) { - logReadInEditsCounter.incr(size); - } - - @Override public void incrLogEditsFiltered(long size) { - logEditsFilteredCounter.incr(size); - } - - @Override public void incrBatchesShipped(int batches) { - shippedBatchesCounter.incr(batches); - } - - @Override public void incrOpsShipped(long ops) { - shippedOpsCounter.incr(ops); - } - - @Override public void incrShippedKBs(long size) { - shippedKBsCounter.incr(size); - } - - @Override public void incrLogReadInBytes(long size) { - logReadInBytesCounter.incr(size); - } - - @Override public void clear() { - } - - @Override - public long getLastShippedAge() { - return ageOfLastShippedOpGauge.value(); - } - - @Override public void incrHFilesShipped(long hfiles) { - shippedHFilesCounter.incr(hfiles); - } - - @Override - public void incrSizeOfHFileRefsQueue(long size) { - sizeOfHFileRefsQueueGauge.incr(size); - } - - @Override - public void decrSizeOfHFileRefsQueue(long size) { - sizeOfHFileRefsQueueGauge.decr(size); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java deleted file mode 100644 index 8f4a337..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver; - -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; - -public class MetricsReplicationSinkSourceImpl implements MetricsReplicationSinkSource { - - private final MutableGaugeLong ageGauge; - private final MutableCounterLong batchesCounter; - private final MutableCounterLong opsCounter; - private final MutableCounterLong hfilesCounter; - - public MetricsReplicationSinkSourceImpl(MetricsReplicationSourceImpl rms) { - ageGauge = rms.getMetricsRegistry().getLongGauge(SINK_AGE_OF_LAST_APPLIED_OP, 0L); - batchesCounter = rms.getMetricsRegistry().getLongCounter(SINK_APPLIED_BATCHES, 0L); - opsCounter = rms.getMetricsRegistry().getLongCounter(SINK_APPLIED_OPS, 0L); - hfilesCounter = rms.getMetricsRegistry().getLongCounter(SINK_APPLIED_HFILES, 0L); - } - - @Override public void setLastAppliedOpAge(long age) { - ageGauge.set(age); - } - - @Override public void incrAppliedBatches(long batches) { - batchesCounter.incr(batches); - } - - @Override public void incrAppliedOps(long batchsize) { - opsCounter.incr(batchsize); - } - - @Override - public long getLastAppliedOpAge() { - return ageGauge.value(); - } - - @Override - public void incrAppliedHFiles(long hfiles) { - hfilesCounter.incr(hfiles); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java deleted file mode 100644 index b07790f..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication.regionserver; - -public class MetricsReplicationSourceFactoryImpl implements MetricsReplicationSourceFactory { - - private static enum SourceHolder { - INSTANCE; - final MetricsReplicationSourceImpl source = new MetricsReplicationSourceImpl(); - } - - @Override public MetricsReplicationSinkSource getSink() { - return new MetricsReplicationSinkSourceImpl(SourceHolder.INSTANCE.source); - } - - @Override public MetricsReplicationSourceSource getSource(String id) { - return new MetricsReplicationSourceSourceImpl(SourceHolder.INSTANCE.source, id); - } - - @Override public MetricsReplicationSourceSource getGlobalSource() { - return new MetricsReplicationGlobalSourceSource(SourceHolder.INSTANCE.source); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java deleted file mode 100644 index f3f4d38..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; - -/** - * Hadoop2 implementation of MetricsReplicationSource. This provides access to metrics gauges and - * counters. - * - * Implements BaseSource through BaseSourceImpl, following the pattern - */ -@InterfaceAudience.Private -public class MetricsReplicationSourceImpl extends BaseSourceImpl implements - MetricsReplicationSource { - - - public MetricsReplicationSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } - - MetricsReplicationSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java deleted file mode 100644 index 217cc3e..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication.regionserver; - -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; - -public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSourceSource { - - private final MetricsReplicationSourceImpl rms; - private final String id; - private final String sizeOfLogQueueKey; - private final String ageOfLastShippedOpKey; - private final String logReadInEditsKey; - private final String logEditsFilteredKey; - private final String shippedBatchesKey; - private final String shippedOpsKey; - private final String shippedKBsKey; - private final String logReadInBytesKey; - private final String shippedHFilesKey; - private final String sizeOfHFileRefsQueueKey; - - private final MutableGaugeLong ageOfLastShippedOpGauge; - private final MutableGaugeLong sizeOfLogQueueGauge; - private final MutableCounterLong logReadInEditsCounter; - private final MutableCounterLong logEditsFilteredCounter; - private final MutableCounterLong shippedBatchesCounter; - private final MutableCounterLong shippedOpsCounter; - private final MutableCounterLong shippedKBsCounter; - private final MutableCounterLong logReadInBytesCounter; - private final MutableCounterLong shippedHFilesCounter; - private final MutableGaugeLong sizeOfHFileRefsQueueGauge; - - public MetricsReplicationSourceSourceImpl(MetricsReplicationSourceImpl rms, String id) { - this.rms = rms; - this.id = id; - - ageOfLastShippedOpKey = "source." + id + ".ageOfLastShippedOp"; - ageOfLastShippedOpGauge = rms.getMetricsRegistry().getLongGauge(ageOfLastShippedOpKey, 0L); - - sizeOfLogQueueKey = "source." + id + ".sizeOfLogQueue"; - sizeOfLogQueueGauge = rms.getMetricsRegistry().getLongGauge(sizeOfLogQueueKey, 0L); - - shippedBatchesKey = "source." + this.id + ".shippedBatches"; - shippedBatchesCounter = rms.getMetricsRegistry().getLongCounter(shippedBatchesKey, 0L); - - shippedOpsKey = "source." + this.id + ".shippedOps"; - shippedOpsCounter = rms.getMetricsRegistry().getLongCounter(shippedOpsKey, 0L); - - shippedKBsKey = "source." + this.id + ".shippedKBs"; - shippedKBsCounter = rms.getMetricsRegistry().getLongCounter(shippedKBsKey, 0L); - - logReadInBytesKey = "source." + this.id + ".logReadInBytes"; - logReadInBytesCounter = rms.getMetricsRegistry().getLongCounter(logReadInBytesKey, 0L); - - logReadInEditsKey = "source." + id + ".logEditsRead"; - logReadInEditsCounter = rms.getMetricsRegistry().getLongCounter(logReadInEditsKey, 0L); - - logEditsFilteredKey = "source." + id + ".logEditsFiltered"; - logEditsFilteredCounter = rms.getMetricsRegistry().getLongCounter(logEditsFilteredKey, 0L); - - shippedHFilesKey = "source." + this.id + ".shippedHFiles"; - shippedHFilesCounter = rms.getMetricsRegistry().getLongCounter(shippedHFilesKey, 0L); - - sizeOfHFileRefsQueueKey = "source." + id + ".sizeOfHFileRefsQueue"; - sizeOfHFileRefsQueueGauge = rms.getMetricsRegistry().getLongGauge(sizeOfHFileRefsQueueKey, 0L); - } - - @Override public void setLastShippedAge(long age) { - ageOfLastShippedOpGauge.set(age); - } - - @Override public void setSizeOfLogQueue(int size) { - sizeOfLogQueueGauge.set(size); - } - - @Override public void incrSizeOfLogQueue(int size) { - sizeOfLogQueueGauge.incr(size); - } - - @Override public void decrSizeOfLogQueue(int size) { - sizeOfLogQueueGauge.decr(size); - } - - @Override public void incrLogReadInEdits(long size) { - logReadInEditsCounter.incr(size); - } - - @Override public void incrLogEditsFiltered(long size) { - logEditsFilteredCounter.incr(size); - } - - @Override public void incrBatchesShipped(int batches) { - shippedBatchesCounter.incr(batches); - } - - @Override public void incrOpsShipped(long ops) { - shippedOpsCounter.incr(ops); - } - - @Override public void incrShippedKBs(long size) { - shippedKBsCounter.incr(size); - } - - @Override public void incrLogReadInBytes(long size) { - logReadInBytesCounter.incr(size); - } - - @Override public void clear() { - rms.removeMetric(ageOfLastShippedOpKey); - - rms.removeMetric(sizeOfLogQueueKey); - - rms.removeMetric(shippedBatchesKey); - rms.removeMetric(shippedOpsKey); - rms.removeMetric(shippedKBsKey); - - rms.removeMetric(logReadInBytesKey); - rms.removeMetric(logReadInEditsKey); - - rms.removeMetric(logEditsFilteredKey); - - rms.removeMetric(shippedHFilesKey); - rms.removeMetric(sizeOfHFileRefsQueueKey); - } - - @Override - public long getLastShippedAge() { - return ageOfLastShippedOpGauge.value(); - } - - @Override - public void incrHFilesShipped(long hfiles) { - shippedHFilesCounter.incr(hfiles); - } - - @Override - public void incrSizeOfHFileRefsQueue(long size) { - sizeOfHFileRefsQueueGauge.incr(size); - } - - @Override - public void decrSizeOfHFileRefsQueue(long size) { - sizeOfHFileRefsQueueGauge.decr(size); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java deleted file mode 100644 index 9eae18b..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.rest; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - -/** - * Hadoop Two implementation of a metrics2 source that will export metrics from the Rest server to - * the hadoop metrics2 subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern - */ -@InterfaceAudience.Private -public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource { - - private MutableCounterLong request; - private MutableCounterLong sucGet; - private MutableCounterLong sucPut; - private MutableCounterLong sucDel; - private MutableCounterLong sucScan; - private MutableCounterLong fGet; - private MutableCounterLong fPut; - private MutableCounterLong fDel; - private MutableCounterLong fScan; - - public MetricsRESTSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT); - } - - public MetricsRESTSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - } - - @Override - public void init() { - super.init(); - request = getMetricsRegistry().getLongCounter(REQUEST_KEY, 0l); - - sucGet = getMetricsRegistry().getLongCounter(SUCCESSFUL_GET_KEY, 0l); - sucPut = getMetricsRegistry().getLongCounter(SUCCESSFUL_PUT_KEY, 0l); - sucDel = getMetricsRegistry().getLongCounter(SUCCESSFUL_DELETE_KEY, 0l); - sucScan = getMetricsRegistry().getLongCounter(SUCCESSFUL_SCAN_KEY, 0L); - - fGet = getMetricsRegistry().getLongCounter(FAILED_GET_KEY, 0l); - fPut = getMetricsRegistry().getLongCounter(FAILED_PUT_KEY, 0l); - fDel = getMetricsRegistry().getLongCounter(FAILED_DELETE_KEY, 0l); - fScan = getMetricsRegistry().getLongCounter(FAILED_SCAN_KEY, 0l); - } - - @Override - public void incrementRequests(int inc) { - request.incr(inc); - } - - @Override - public void incrementSucessfulGetRequests(int inc) { - sucGet.incr(inc); - } - - @Override - public void incrementSucessfulPutRequests(int inc) { - sucPut.incr(inc); - } - - @Override - public void incrementSucessfulDeleteRequests(int inc) { - sucDel.incr(inc); - } - - @Override - public void incrementFailedGetRequests(int inc) { - fGet.incr(inc); - } - - @Override - public void incrementFailedPutRequests(int inc) { - fPut.incr(inc); - } - - @Override - public void incrementFailedDeleteRequests(int inc) { - fDel.incr(inc); - } - - @Override - public void incrementSucessfulScanRequests(int inc) { - sucScan.incr(inc); - } - - @Override - public void incrementFailedScanRequests(int inc) { - fScan.incr(inc); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java deleted file mode 100644 index f6ad6da..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.thrift; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; - -/** - * Class used to create metrics sources for Thrift and Thrift2 servers. - */ -@InterfaceAudience.Private -public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory { - - /** - * A singleton used to make sure that only one thrift metrics source per server type is ever - * created. - */ - private static enum FactoryStorage { - INSTANCE; - MetricsThriftServerSourceImpl thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME, - METRICS_DESCRIPTION, - THRIFT_ONE_METRICS_CONTEXT, - THRIFT_ONE_JMX_CONTEXT); - MetricsThriftServerSourceImpl thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME, - METRICS_DESCRIPTION, - THRIFT_TWO_METRICS_CONTEXT, - THRIFT_TWO_JMX_CONTEXT); - } - - @Override - public MetricsThriftServerSource createThriftOneSource() { - return FactoryStorage.INSTANCE.thriftOne; - } - - @Override - public MetricsThriftServerSource createThriftTwoSource() { - return FactoryStorage.INSTANCE.thriftTwo; - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java deleted file mode 100644 index f9612e5..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.thrift; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; -import org.apache.hadoop.metrics2.lib.MutableHistogram; - -/** - * Hadoop 2 version of MetricsThriftServerSource{@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource} - * - * Implements BaseSource through BaseSourceImpl, following the pattern - */ -@InterfaceAudience.Private -public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements - MetricsThriftServerSource { - - private MutableHistogram batchGetStat; - private MutableHistogram batchMutateStat; - private MutableHistogram queueTimeStat; - - private MutableHistogram thriftCallStat; - private MutableHistogram thriftSlowCallStat; - - private MutableGaugeLong callQueueLenGauge; - - public MetricsThriftServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - } - - @Override - public void init() { - super.init(); - batchGetStat = getMetricsRegistry().newTimeHistogram(BATCH_GET_KEY); - batchMutateStat = getMetricsRegistry().newTimeHistogram(BATCH_MUTATE_KEY); - queueTimeStat = getMetricsRegistry().newTimeHistogram(TIME_IN_QUEUE_KEY); - thriftCallStat = getMetricsRegistry().newTimeHistogram(THRIFT_CALL_KEY); - thriftSlowCallStat = getMetricsRegistry().newTimeHistogram(SLOW_THRIFT_CALL_KEY); - callQueueLenGauge = getMetricsRegistry().getLongGauge(CALL_QUEUE_LEN_KEY, 0); - - } - - @Override - public void incTimeInQueue(long time) { - queueTimeStat.add(time); - } - - @Override - public void setCallQueueLen(int len) { - callQueueLenGauge.set(len); - } - - @Override - public void incNumRowKeysInBatchGet(int diff) { - batchGetStat.add(diff); - } - - @Override - public void incNumRowKeysInBatchMutate(int diff) { - batchMutateStat.add(diff); - } - - @Override - public void incMethodTime(String name, long time) { - MutableHistogram s = getMetricsRegistry().getHistogram(name); - s.add(time); - } - - @Override - public void incCall(long time) { - thriftCallStat.add(time); - } - - @Override - public void incSlowCall(long time) { - thriftSlowCallStat.add(time); - } - -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java deleted file mode 100644 index 95734ba..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.metrics2.impl; - -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsExecutor; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MetricsExecutorImpl; - -/** - * JMX caches the beans that have been exported; even after the values are removed from hadoop's - * metrics system the keys and old values will still remain. This class stops and restarts the - * Hadoop metrics system, forcing JMX to clear the cache of exported metrics. - * - * This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used - * are package private. - */ -@InterfaceAudience.Private -public class JmxCacheBuster { - private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class); - private static AtomicReference fut = new AtomicReference<>(null); - private static MetricsExecutor executor = new MetricsExecutorImpl(); - - private JmxCacheBuster() { - // Static only cache. - } - - /** - * For JMX to forget about all previously exported metrics. - */ - public static void clearJmxCache() { - //If there are more then 100 ms before the executor will run then everything should be merged. - ScheduledFuture future = fut.get(); - if ((future == null || (!future.isDone() && future.getDelay(TimeUnit.MILLISECONDS) > 100))) { - // BAIL OUT - return; - } - future = executor.getExecutor().schedule(new JmxCacheBusterRunnable(), 5, TimeUnit.SECONDS); - fut.set(future); - } - - final static class JmxCacheBusterRunnable implements Runnable { - @Override - public void run() { - if (LOG.isTraceEnabled()) { - LOG.trace("Clearing JMX mbean cache."); - } - - // This is pretty extreme but it's the best way that - // I could find to get metrics to be removed. - try { - if (DefaultMetricsSystem.instance() != null) { - DefaultMetricsSystem.instance().stop(); - // Sleep some time so that the rest of the hadoop metrics - // system knows that things are done - Thread.sleep(500); - DefaultMetricsSystem.instance().start(); - } - } catch (Exception exception) { - LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", - exception); - } - } - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java deleted file mode 100644 index 832e220..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.metrics2.lib; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import java.lang.reflect.Method; - -public class DefaultMetricsSystemHelper { - - private static final Log LOG = LogFactory.getLog(DefaultMetricsSystemHelper.class); - private final Method removeObjectMethod; - - public DefaultMetricsSystemHelper() { - Method m; - try { - Class clazz = DefaultMetricsSystem.INSTANCE.getClass(); - m = clazz.getDeclaredMethod("removeObjectName", String.class); - m.setAccessible(true); - } catch (NoSuchMethodException e) { - m = null; - } - removeObjectMethod = m; - } - - public boolean removeObjectName(final String name) { - if (removeObjectMethod != null) { - try { - removeObjectMethod.invoke(DefaultMetricsSystem.INSTANCE, name); - return true; - } catch (Exception e) { - if (LOG.isTraceEnabled()) { - LOG.trace("Unable to remove object name from cache: " + name, e); - } - } - } - return false; - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java deleted file mode 100644 index ee13c76..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java +++ /dev/null @@ -1,609 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2.lib; - -import java.util.Collection; -import java.util.concurrent.ConcurrentMap; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsException; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsTag; -import org.apache.hadoop.metrics2.impl.MsInfo; - -import com.google.common.base.Objects; -import com.google.common.collect.Maps; - -/** - * An optional metrics registry class for creating and maintaining a - * collection of MetricsMutables, making writing metrics source easier. - * NOTE: this is a copy of org.apache.hadoop.metrics2.lib.MetricsRegistry with added one - * feature: metrics can be removed. When HADOOP-8313 is fixed, usages of this class - * should be substituted with org.apache.hadoop.metrics2.lib.MetricsRegistry. - * This implementation also provides handy methods for creating metrics - * dynamically. - * Another difference is that metricsMap implementation is substituted with - * thread-safe map, as we allow dynamic metrics additions/removals. - */ -@InterfaceAudience.Private -public class DynamicMetricsRegistry { - private static final Log LOG = LogFactory.getLog(DynamicMetricsRegistry.class); - - private final ConcurrentMap metricsMap = - Maps.newConcurrentMap(); - private final ConcurrentMap tagsMap = - Maps.newConcurrentMap(); - private final MetricsInfo metricsInfo; - private final DefaultMetricsSystemHelper helper = new DefaultMetricsSystemHelper(); - private final static String[] histogramSuffixes = new String[]{ - "_num_ops", - "_min", - "_max", - "_median", - "_75th_percentile", - "_90th_percentile", - "_95th_percentile", - "_99th_percentile"}; - - /** - * Construct the registry with a record name - * @param name of the record of the metrics - */ - public DynamicMetricsRegistry(String name) { - this(Interns.info(name,name)); - } - - /** - * Construct the registry with a metadata object - * @param info the info object for the metrics record/group - */ - public DynamicMetricsRegistry(MetricsInfo info) { - metricsInfo = info; - } - - /** - * @return the info object of the metrics registry - */ - public MetricsInfo info() { - return metricsInfo; - } - - /** - * Get a metric by name - * @param name of the metric - * @return the metric object - */ - public MutableMetric get(String name) { - return metricsMap.get(name); - } - - /** - * Get a tag by name - * @param name of the tag - * @return the tag object - */ - public MetricsTag getTag(String name) { - return tagsMap.get(name); - } - - /** - * Create a mutable integer counter - * @param name of the metric - * @param desc metric description - * @param iVal initial value - * @return a new counter object - */ - public MutableCounterInt newCounter(String name, String desc, int iVal) { - return newCounter(new MetricsInfoImpl(name, desc), iVal); - } - - /** - * Create a mutable integer counter - * @param info metadata of the metric - * @param iVal initial value - * @return a new counter object - */ - public MutableCounterInt newCounter(MetricsInfo info, int iVal) { - MutableCounterInt ret = new MutableCounterInt(info, iVal); - return addNewMetricIfAbsent(info.name(), ret, MutableCounterInt.class); - } - - /** - * Create a mutable long integer counter - * @param name of the metric - * @param desc metric description - * @param iVal initial value - * @return a new counter object - */ - public MutableCounterLong newCounter(String name, String desc, long iVal) { - return newCounter(new MetricsInfoImpl(name, desc), iVal); - } - - /** - * Create a mutable long integer counter - * @param info metadata of the metric - * @param iVal initial value - * @return a new counter object - */ - public MutableCounterLong newCounter(MetricsInfo info, long iVal) { - MutableCounterLong ret = new MutableCounterLong(info, iVal); - return addNewMetricIfAbsent(info.name(), ret, MutableCounterLong.class); - } - - /** - * Create a mutable integer gauge - * @param name of the metric - * @param desc metric description - * @param iVal initial value - * @return a new gauge object - */ - public MutableGaugeInt newGauge(String name, String desc, int iVal) { - return newGauge(new MetricsInfoImpl(name, desc), iVal); - } - /** - * Create a mutable integer gauge - * @param info metadata of the metric - * @param iVal initial value - * @return a new gauge object - */ - public MutableGaugeInt newGauge(MetricsInfo info, int iVal) { - MutableGaugeInt ret = new MutableGaugeInt(info, iVal); - return addNewMetricIfAbsent(info.name(), ret, MutableGaugeInt.class); - } - - /** - * Create a mutable long integer gauge - * @param name of the metric - * @param desc metric description - * @param iVal initial value - * @return a new gauge object - */ - public MutableGaugeLong newGauge(String name, String desc, long iVal) { - return newGauge(new MetricsInfoImpl(name, desc), iVal); - } - - /** - * Create a mutable long integer gauge - * @param info metadata of the metric - * @param iVal initial value - * @return a new gauge object - */ - public MutableGaugeLong newGauge(MetricsInfo info, long iVal) { - MutableGaugeLong ret = new MutableGaugeLong(info, iVal); - return addNewMetricIfAbsent(info.name(), ret, MutableGaugeLong.class); - } - - /** - * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") - * @param extended produce extended stat (stdev, min/max etc.) if true. - * @return a new mutable stat metric object - */ - public MutableStat newStat(String name, String desc, - String sampleName, String valueName, boolean extended) { - MutableStat ret = - new MutableStat(name, desc, sampleName, valueName, extended); - return addNewMetricIfAbsent(name, ret, MutableStat.class); - } - - /** - * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") - * @return a new mutable metric object - */ - public MutableStat newStat(String name, String desc, - String sampleName, String valueName) { - return newStat(name, desc, sampleName, valueName, false); - } - - /** - * Create a mutable rate metric - * @param name of the metric - * @return a new mutable metric object - */ - public MutableRate newRate(String name) { - return newRate(name, name, false); - } - - /** - * Create a mutable rate metric - * @param name of the metric - * @param description of the metric - * @return a new mutable rate metric object - */ - public MutableRate newRate(String name, String description) { - return newRate(name, description, false); - } - - /** - * Create a mutable rate metric (for throughput measurement) - * @param name of the metric - * @param desc description - * @param extended produce extended stat (stdev/min/max etc.) if true - * @return a new mutable rate metric object - */ - public MutableRate newRate(String name, String desc, boolean extended) { - return newRate(name, desc, extended, true); - } - - @InterfaceAudience.Private - public MutableRate newRate(String name, String desc, - boolean extended, boolean returnExisting) { - if (returnExisting) { - MutableMetric rate = metricsMap.get(name); - if (rate != null) { - if (rate instanceof MutableRate) return (MutableRate) rate; - throw new MetricsException("Unexpected metrics type "+ rate.getClass() - +" for "+ name); - } - } - MutableRate ret = new MutableRate(name, desc, extended); - return addNewMetricIfAbsent(name, ret, MutableRate.class); - } - - /** - * Create a new histogram. - * @param name Name of the histogram. - * @return A new MutableHistogram - */ - public MutableHistogram newHistogram(String name) { - return newHistogram(name, ""); - } - - /** - * Create a new histogram. - * @param name The name of the histogram - * @param desc The description of the data in the histogram. - * @return A new MutableHistogram - */ - public MutableHistogram newHistogram(String name, String desc) { - MutableHistogram histo = new MutableHistogram(name, desc); - return addNewMetricIfAbsent(name, histo, MutableHistogram.class); - } - - /** - * Create a new histogram with time range counts. - * @param name Name of the histogram. - * @return A new MutableTimeHistogram - */ - public MutableTimeHistogram newTimeHistogram(String name) { - return newTimeHistogram(name, ""); - } - - /** - * Create a new histogram with time range counts. - * @param name The name of the histogram - * @param desc The description of the data in the histogram. - * @return A new MutableTimeHistogram - */ - public MutableTimeHistogram newTimeHistogram(String name, String desc) { - MutableTimeHistogram histo = new MutableTimeHistogram(name, desc); - return addNewMetricIfAbsent(name, histo, MutableTimeHistogram.class); - } - - /** - * Create a new histogram with size range counts. - * @param name Name of the histogram. - * @return A new MutableSizeHistogram - */ - public MutableSizeHistogram newSizeHistogram(String name) { - return newSizeHistogram(name, ""); - } - - /** - * Create a new histogram with size range counts. - * @param name The name of the histogram - * @param desc The description of the data in the histogram. - * @return A new MutableSizeHistogram - */ - public MutableSizeHistogram newSizeHistogram(String name, String desc) { - MutableSizeHistogram histo = new MutableSizeHistogram(name, desc); - return addNewMetricIfAbsent(name, histo, MutableSizeHistogram.class); - } - - /** - * Create a new MutableQuantile(A more accurate histogram). - * @param name The name of the histogram - * @return a new MutableQuantile - */ - public MetricMutableQuantiles newQuantile(String name) { - return newQuantile(name, ""); - } - - public MetricMutableQuantiles newQuantile(String name, String desc) { - MetricMutableQuantiles histo = new MetricMutableQuantiles(name, desc, "Ops", "", 60); - return addNewMetricIfAbsent(name, histo, MetricMutableQuantiles.class); - } - - synchronized void add(String name, MutableMetric metric) { - addNewMetricIfAbsent(name, metric, MutableMetric.class); - } - - /** - * Add sample to a stat metric by name. - * @param name of the metric - * @param value of the snapshot to add - */ - public void add(String name, long value) { - MutableMetric m = metricsMap.get(name); - - if (m != null) { - if (m instanceof MutableStat) { - ((MutableStat) m).add(value); - } - else { - throw new MetricsException("Unsupported add(value) for metric "+ name); - } - } - else { - metricsMap.put(name, newRate(name)); // default is a rate metric - add(name, value); - } - } - - /** - * Set the metrics context tag - * @param name of the context - * @return the registry itself as a convenience - */ - public DynamicMetricsRegistry setContext(String name) { - return tag(MsInfo.Context, name, true); - } - - /** - * Add a tag to the metrics - * @param name of the tag - * @param description of the tag - * @param value of the tag - * @return the registry (for keep adding tags) - */ - public DynamicMetricsRegistry tag(String name, String description, String value) { - return tag(name, description, value, false); - } - - /** - * Add a tag to the metrics - * @param name of the tag - * @param description of the tag - * @param value of the tag - * @param override existing tag if true - * @return the registry (for keep adding tags) - */ - public DynamicMetricsRegistry tag(String name, String description, String value, - boolean override) { - return tag(new MetricsInfoImpl(name, description), value, override); - } - - /** - * Add a tag to the metrics - * @param info metadata of the tag - * @param value of the tag - * @param override existing tag if true - * @return the registry (for keep adding tags etc.) - */ - public DynamicMetricsRegistry tag(MetricsInfo info, String value, boolean override) { - MetricsTag tag = Interns.tag(info, value); - - if (!override) { - MetricsTag existing = tagsMap.putIfAbsent(info.name(), tag); - if (existing != null) { - throw new MetricsException("Tag "+ info.name() +" already exists!"); - } - return this; - } - - tagsMap.put(info.name(), tag); - - return this; - } - - public DynamicMetricsRegistry tag(MetricsInfo info, String value) { - return tag(info, value, false); - } - - Collection tags() { - return tagsMap.values(); - } - - Collection metrics() { - return metricsMap.values(); - } - - /** - * Sample all the mutable metrics and put the snapshot in the builder - * @param builder to contain the metrics snapshot - * @param all get all the metrics even if the values are not changed. - */ - public void snapshot(MetricsRecordBuilder builder, boolean all) { - for (MetricsTag tag : tags()) { - builder.add(tag); - } - for (MutableMetric metric : metrics()) { - metric.snapshot(builder, all); - } - } - - @Override public String toString() { - return Objects.toStringHelper(this) - .add("info", metricsInfo).add("tags", tags()).add("metrics", metrics()) - .toString(); - } - - /** - * Removes metric by name - * @param name name of the metric to remove - */ - public void removeMetric(String name) { - helper.removeObjectName(name); - metricsMap.remove(name); - } - - public void removeHistogramMetrics(String baseName) { - for (String suffix:histogramSuffixes) { - removeMetric(baseName+suffix); - } - } - - /** - * Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it. - * - * @param gaugeName name of the gauge to create or get. - * @param potentialStartingValue value of the new gauge if we have to create it. - */ - public MutableGaugeLong getLongGauge(String gaugeName, long potentialStartingValue) { - //Try and get the guage. - MutableMetric metric = metricsMap.get(gaugeName); - - //If it's not there then try and put a new one in the storage. - if (metric == null) { - - //Create the potential new gauge. - MutableGaugeLong newGauge = new MutableGaugeLong(new MetricsInfoImpl(gaugeName, ""), - potentialStartingValue); - - // Try and put the gauge in. This is atomic. - metric = metricsMap.putIfAbsent(gaugeName, newGauge); - - //If the value we get back is null then the put was successful and we will return that. - //otherwise gaugeLong should contain the thing that was in before the put could be completed. - if (metric == null) { - return newGauge; - } - } - - if (!(metric instanceof MutableGaugeLong)) { - throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + - " and not of type MetricMutableGaugeLong"); - } - - return (MutableGaugeLong) metric; - } - - /** - * Get a MetricMutableCounterLong from the storage. If it is not there atomically put it. - * - * @param counterName Name of the counter to get - * @param potentialStartingValue starting value if we have to create a new counter - */ - public MutableCounterLong getLongCounter(String counterName, long potentialStartingValue) { - //See getLongGauge for description on how this works. - MutableMetric counter = metricsMap.get(counterName); - if (counter == null) { - MutableCounterLong newCounter = - new MutableCounterLong(new MetricsInfoImpl(counterName, ""), potentialStartingValue); - counter = metricsMap.putIfAbsent(counterName, newCounter); - if (counter == null) { - return newCounter; - } - } - - - if (!(counter instanceof MutableCounterLong)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - counterName + " and not of type MetricMutableCounterLong"); - } - - return (MutableCounterLong) counter; - } - - public MutableHistogram getHistogram(String histoName) { - //See getLongGauge for description on how this works. - MutableMetric histo = metricsMap.get(histoName); - if (histo == null) { - MutableHistogram newCounter = - new MutableHistogram(new MetricsInfoImpl(histoName, "")); - histo = metricsMap.putIfAbsent(histoName, newCounter); - if (histo == null) { - return newCounter; - } - } - - - if (!(histo instanceof MutableHistogram)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - histoName + " and not of type MutableHistogram"); - } - - return (MutableHistogram) histo; - } - - public MetricMutableQuantiles getQuantile(String histoName) { - //See getLongGauge for description on how this works. - MutableMetric histo = metricsMap.get(histoName); - if (histo == null) { - MetricMutableQuantiles newCounter = - new MetricMutableQuantiles(histoName, "", "Ops", "", 60); - histo = metricsMap.putIfAbsent(histoName, newCounter); - if (histo == null) { - return newCounter; - } - } - - - if (!(histo instanceof MetricMutableQuantiles)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - histoName + " and not of type MutableHistogram"); - } - - return (MetricMutableQuantiles) histo; - } - - private T - addNewMetricIfAbsent(String name, - T ret, - Class metricClass) { - //If the value we get back is null then the put was successful and we will - // return that. Otherwise metric should contain the thing that was in - // before the put could be completed. - MutableMetric metric = metricsMap.putIfAbsent(name, ret); - if (metric == null) { - return ret; - } - - return returnExistingWithCast(metric, metricClass, name); - } - - @SuppressWarnings("unchecked") - private T returnExistingWithCast(MutableMetric metric, - Class metricClass, String name) { - if (!metricClass.isAssignableFrom(metric.getClass())) { - throw new MetricsException("Metric already exists in registry for metric name: " + - name + " and not of type " + metricClass + - " but instead of type " + metric.getClass()); - } - - return (T) metric; - } - - public void clearMetrics() { - for (String name:metricsMap.keySet()) { - helper.removeObjectName(name); - } - metricsMap.clear(); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java deleted file mode 100644 index c03654b..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2.lib; - -import static org.apache.hadoop.metrics2.lib.Interns.info; - -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricHistogram; -import org.apache.hadoop.metrics2.MetricsExecutor; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.util.MetricQuantile; -import org.apache.hadoop.metrics2.util.MetricSampleQuantiles; - -import com.google.common.annotations.VisibleForTesting; - -/** - * Watches a stream of long values, maintaining online estimates of specific quantiles with provably - * low error bounds. This is particularly useful for accurate high-percentile (e.g. 95th, 99th) - * latency metrics. - */ -@InterfaceAudience.Private -public class MetricMutableQuantiles extends MutableMetric implements MetricHistogram { - - static final MetricQuantile[] quantiles = {new MetricQuantile(0.50, 0.050), - new MetricQuantile(0.75, 0.025), new MetricQuantile(0.90, 0.010), - new MetricQuantile(0.95, 0.005), new MetricQuantile(0.99, 0.001)}; - - private final MetricsInfo numInfo; - private final MetricsInfo[] quantileInfos; - private final int interval; - - private MetricSampleQuantiles estimator; - private long previousCount = 0; - private MetricsExecutor executor; - - - @VisibleForTesting - protected Map previousSnapshot = null; - - /** - * Instantiates a new {@link MetricMutableQuantiles} for a metric that rolls itself over on the - * specified time interval. - * - * @param name of the metric - * @param description long-form textual description of the metric - * @param sampleName type of items in the stream (e.g., "Ops") - * @param valueName type of the values - * @param interval rollover interval (in seconds) of the estimator - */ - public MetricMutableQuantiles(String name, String description, String sampleName, - String valueName, int interval) { - String ucName = StringUtils.capitalize(name); - String usName = StringUtils.capitalize(sampleName); - String uvName = StringUtils.capitalize(valueName); - String desc = StringUtils.uncapitalize(description); - String lsName = StringUtils.uncapitalize(sampleName); - String lvName = StringUtils.uncapitalize(valueName); - - numInfo = info(ucName + "Num" + usName, String.format( - "Number of %s for %s with %ds interval", lsName, desc, interval)); - // Construct the MetricsInfos for the quantiles, converting to percentiles - quantileInfos = new MetricsInfo[quantiles.length]; - String nameTemplate = "%s%dthPercentile%dsInterval%s"; - String descTemplate = "%d percentile %s with %d second interval for %s"; - for (int i = 0; i < quantiles.length; i++) { - int percentile = (int) (100 * quantiles[i].quantile); - quantileInfos[i] = info(String.format(nameTemplate, ucName, percentile, interval, uvName), - String.format(descTemplate, percentile, lvName, interval, desc)); - } - - estimator = new MetricSampleQuantiles(quantiles); - executor = new MetricsExecutorImpl(); - this.interval = interval; - executor.getExecutor().scheduleAtFixedRate(new RolloverSample(this), - interval, - interval, - TimeUnit.SECONDS); - } - - @Override - public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) { - if (all || changed()) { - builder.addGauge(numInfo, previousCount); - for (int i = 0; i < quantiles.length; i++) { - long newValue = 0; - // If snapshot is null, we failed to update since the window was empty - if (previousSnapshot != null) { - newValue = previousSnapshot.get(quantiles[i]); - } - builder.addGauge(quantileInfos[i], newValue); - } - if (changed()) { - clearChanged(); - } - } - } - - public synchronized void add(long value) { - estimator.insert(value); - } - - public int getInterval() { - return interval; - } - - /** Runnable used to periodically roll over the internal {@link org.apache.hadoop.metrics2.util.MetricSampleQuantiles} every interval. */ - private static class RolloverSample implements Runnable { - - MetricMutableQuantiles parent; - - public RolloverSample(MetricMutableQuantiles parent) { - this.parent = parent; - } - - @Override - public void run() { - synchronized (parent) { - try { - parent.previousCount = parent.estimator.getCount(); - parent.previousSnapshot = parent.estimator.snapshot(); - } catch (IOException e) { - // Couldn't get a new snapshot because the window was empty - parent.previousCount = 0; - parent.previousSnapshot = null; - } - parent.estimator.clear(); - } - parent.setChanged(); - } - - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java deleted file mode 100644 index f70413e..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2.lib; - -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsExecutor; - -/** - * Class to handle the ScheduledExecutorService{@link ScheduledExecutorService} used by - * MetricMutableQuantiles{@link MetricMutableQuantiles}, MetricsRegionAggregateSourceImpl, and - * JmxCacheBuster - */ -@InterfaceAudience.Private -public class MetricsExecutorImpl implements MetricsExecutor { - - @Override - public ScheduledExecutorService getExecutor() { - return ExecutorSingleton.INSTANCE.scheduler; - } - - @Override - public void stop() { - if (!getExecutor().isShutdown()) { - getExecutor().shutdown(); - } - } - - private enum ExecutorSingleton { - INSTANCE; - private final ScheduledExecutorService scheduler = new ScheduledThreadPoolExecutor(1, - new ThreadPoolExecutorThreadFactory("HBase-Metrics2-")); - } - - private final static class ThreadPoolExecutorThreadFactory implements ThreadFactory { - private final String name; - private final AtomicInteger threadNumber = new AtomicInteger(1); - - private ThreadPoolExecutorThreadFactory(String name) { - this.name = name; - } - - @Override - public Thread newThread(Runnable runnable) { - Thread t = new Thread(runnable, name + threadNumber.getAndIncrement()); - t.setDaemon(true); - return t; - } - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java deleted file mode 100644 index c7ff940..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2.lib; - -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricHistogram; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; - -import com.codahale.metrics.ExponentiallyDecayingReservoir; -import com.codahale.metrics.Reservoir; -import com.codahale.metrics.Snapshot; - -/** - * A histogram implementation that runs in constant space, and exports to hadoop2's metrics2 system. - */ -@InterfaceAudience.Private -public class MutableHistogram extends MutableMetric implements MetricHistogram { - - private static final int DEFAULT_SAMPLE_SIZE = 2046; - // the bias towards sampling from more recent data. - // Per Cormode et al. an alpha of 0.015 strongly biases to the last 5 minutes - private static final double DEFAULT_ALPHA = 0.015; - - protected final String name; - protected final String desc; - private final Reservoir reservoir; - private final AtomicLong min; - private final AtomicLong max; - private final AtomicLong sum; - private final AtomicLong count; - - public MutableHistogram(MetricsInfo info) { - this(info.name(), info.description()); - } - - public MutableHistogram(String name, String description) { - this.name = StringUtils.capitalize(name); - this.desc = StringUtils.uncapitalize(description); - reservoir = new ExponentiallyDecayingReservoir(DEFAULT_SAMPLE_SIZE, DEFAULT_ALPHA); - count = new AtomicLong(); - min = new AtomicLong(Long.MAX_VALUE); - max = new AtomicLong(Long.MIN_VALUE); - sum = new AtomicLong(); - } - - public void add(final long val) { - setChanged(); - count.incrementAndGet(); - reservoir.update(val); - setMax(val); - setMin(val); - sum.getAndAdd(val); - } - - private void setMax(final long potentialMax) { - boolean done = false; - while (!done) { - final long currentMax = max.get(); - done = currentMax >= potentialMax - || max.compareAndSet(currentMax, potentialMax); - } - } - - private void setMin(long potentialMin) { - boolean done = false; - while (!done) { - final long currentMin = min.get(); - done = currentMin <= potentialMin - || min.compareAndSet(currentMin, potentialMin); - } - } - - public long getMax() { - if (count.get() > 0) { - return max.get(); - } - return 0L; - } - - public long getMin() { - if (count.get() > 0) { - return min.get(); - } - return 0L; - } - - public double getMean() { - long cCount = count.get(); - if (cCount > 0) { - return sum.get() / (double) cCount; - } - return 0.0; - } - - @Override - public void snapshot(MetricsRecordBuilder metricsRecordBuilder, boolean all) { - if (all || changed()) { - clearChanged(); - updateSnapshotMetrics(metricsRecordBuilder); - } - } - - public void updateSnapshotMetrics(MetricsRecordBuilder metricsRecordBuilder) { - final Snapshot s = reservoir.getSnapshot(); - metricsRecordBuilder.addCounter(Interns.info(name + NUM_OPS_METRIC_NAME, desc), count.get()); - - metricsRecordBuilder.addGauge(Interns.info(name + MIN_METRIC_NAME, desc), getMin()); - metricsRecordBuilder.addGauge(Interns.info(name + MAX_METRIC_NAME, desc), getMax()); - metricsRecordBuilder.addGauge(Interns.info(name + MEAN_METRIC_NAME, desc), getMean()); - - metricsRecordBuilder.addGauge(Interns.info(name + MEDIAN_METRIC_NAME, desc), s.getMedian()); - metricsRecordBuilder.addGauge(Interns.info(name + SEVENTY_FIFTH_PERCENTILE_METRIC_NAME, desc), - s.get75thPercentile()); - metricsRecordBuilder.addGauge(Interns.info(name + NINETIETH_PERCENTILE_METRIC_NAME, desc), - s.getValue(0.90)); - metricsRecordBuilder.addGauge(Interns.info(name + NINETY_FIFTH_PERCENTILE_METRIC_NAME, desc), - s.get95thPercentile()); - metricsRecordBuilder.addGauge(Interns.info(name + NINETY_NINETH_PERCENTILE_METRIC_NAME, desc), - s.get99thPercentile()); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java deleted file mode 100644 index ac1f497..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2.lib; - -import java.util.concurrent.atomic.AtomicLongArray; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; - -/** - * Extended histogram implementation with metric range counters. - */ -@InterfaceAudience.Private -public abstract class MutableRangeHistogram extends MutableHistogram { - - public MutableRangeHistogram(MetricsInfo info) { - this(info.name(), info.description()); - } - - public MutableRangeHistogram(String name, String description) { - super(name, description); - } - - /** - * Returns the type of range histogram size or time - */ - public abstract String getRangeType(); - - /** - * Returns the ranges to be counted - */ - public abstract long[] getRange(); - - /** - * Returns the range counts - */ - public abstract AtomicLongArray getRangeVals(); - - @Override - public void add(final long val) { - super.add(val); - updateBand(val); - } - - private void updateBand(final long val) { - int i; - for (i=0; i getRange()[i]; i++); - getRangeVals().incrementAndGet(i); - } - - @Override - public void snapshot(MetricsRecordBuilder metricsRecordBuilder, boolean all) { - if (all || changed()) { - clearChanged(); - updateSnapshotMetrics(metricsRecordBuilder); - updateSnapshotRangeMetrics(metricsRecordBuilder); - } - } - - public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder) { - long prior = 0; - for (int i = 0; i < getRange().length; i++) { - long val = getRangeVals().get(i); - if (val > 0) { - metricsRecordBuilder.addCounter( - Interns.info(name + "_" + getRangeType() + "_" + prior + "-" + getRange()[i], desc), val); - } - prior = getRange()[i]; - } - long val = getRangeVals().get(getRange().length); - if (val > 0) { - metricsRecordBuilder.addCounter( - Interns.info(name + "_" + getRangeType() + "_" + getRange()[getRange().length - 1] + "-inf", desc), - getRangeVals().get(getRange().length)); - } - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java deleted file mode 100644 index 2f1d57a..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2.lib; - -import java.util.concurrent.atomic.AtomicLongArray; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsInfo; - -/** - * Extended histogram implementation with counters for metric size ranges. - */ -@InterfaceAudience.Private -public class MutableSizeHistogram extends MutableRangeHistogram { - private final String rangeType = "SizeRangeCount"; - private final long[] ranges = {10,100,1000,10000,100000,1000000,10000000,100000000}; - private final AtomicLongArray rangeVals = new AtomicLongArray(getRange().length+1); - - public MutableSizeHistogram(MetricsInfo info) { - this(info.name(), info.description()); - } - - public MutableSizeHistogram(String name, String description) { - super(name, description); - } - - @Override - public String getRangeType() { - return rangeType; - } - - @Override - public long[] getRange() { - return ranges; - } - - @Override - public AtomicLongArray getRangeVals() { - return rangeVals; - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java deleted file mode 100644 index 32d4fae..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2.lib; - -import java.util.concurrent.atomic.AtomicLongArray; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsInfo; - -/** - * Extended histogram implementation with counters for metric time ranges. - */ -@InterfaceAudience.Private -public class MutableTimeHistogram extends MutableRangeHistogram { - private final String rangeType = "TimeRangeCount"; - private final long[] ranges = - { 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 }; - private final AtomicLongArray rangeVals = new AtomicLongArray(ranges.length+1); - - public MutableTimeHistogram(MetricsInfo info) { - this(info.name(), info.description()); - } - - public MutableTimeHistogram(String name, String description) { - super(name, description); - } - - @Override - public String getRangeType() { - return rangeType; - } - - @Override - public long[] getRange() { - return ranges; - } - - @Override - public AtomicLongArray getRangeVals() { - return rangeVals; - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java deleted file mode 100644 index 1ec75e0..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2.util; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; - -/** - * Specifies a quantile (with error bounds) to be watched by a - * {@link MetricSampleQuantiles} object. - */ -@InterfaceAudience.Private -public class MetricQuantile { - public final double quantile; - public final double error; - - public MetricQuantile(double quantile, double error) { - this.quantile = quantile; - this.error = error; - } - - @Override - public boolean equals(Object aThat) { - if (this == aThat) { - return true; - } - if (!(aThat instanceof MetricQuantile)) { - return false; - } - - MetricQuantile that = (MetricQuantile) aThat; - - long qbits = Double.doubleToLongBits(quantile); - long ebits = Double.doubleToLongBits(error); - - return qbits == Double.doubleToLongBits(that.quantile) - && ebits == Double.doubleToLongBits(that.error); - } - - @Override - public int hashCode() { - return (int) (Double.doubleToLongBits(quantile) ^ Double - .doubleToLongBits(error)); - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java deleted file mode 100644 index 96c79ae..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java +++ /dev/null @@ -1,310 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2.util; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.ListIterator; -import java.util.Map; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; - -import com.google.common.annotations.VisibleForTesting; - -/** - * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm - * for streaming calculation of targeted high-percentile epsilon-approximate - * quantiles. - * - * This is a generalization of the earlier work by Greenwald and Khanna (GK), - * which essentially allows different error bounds on the targeted quantiles, - * which allows for far more efficient calculation of high-percentiles. - * - * See: Cormode, Korn, Muthukrishnan, and Srivastava - * "Effective Computation of Biased Quantiles over Data Streams" in ICDE 2005 - * - * Greenwald and Khanna, - * "Space-efficient online computation of quantile summaries" in SIGMOD 2001 - * - */ -@InterfaceAudience.Private -public class MetricSampleQuantiles { - - /** - * Total number of items in stream - */ - private long count = 0; - - /** - * Current list of sampled items, maintained in sorted order with error bounds - */ - private LinkedList samples; - - /** - * Buffers incoming items to be inserted in batch. Items are inserted into - * the buffer linearly. When the buffer fills, it is flushed into the samples - * array in its entirety. - */ - private long[] buffer = new long[500]; - private int bufferCount = 0; - - /** - * Array of Quantiles that we care about, along with desired error. - */ - private final MetricQuantile quantiles[]; - - public MetricSampleQuantiles(MetricQuantile[] quantiles) { - this.quantiles = Arrays.copyOf(quantiles, quantiles.length); - this.samples = new LinkedList(); - } - - /** - * Specifies the allowable error for this rank, depending on which quantiles - * are being targeted. - * - * This is the f(r_i, n) function from the CKMS paper. It's basically how wide - * the range of this rank can be. - * - * @param rank - * the index in the list of samples - */ - private double allowableError(int rank) { - int size = samples.size(); - double minError = size + 1; - for (MetricQuantile q : quantiles) { - double error; - if (rank <= q.quantile * size) { - error = (2.0 * q.error * (size - rank)) / (1.0 - q.quantile); - } else { - error = (2.0 * q.error * rank) / q.quantile; - } - if (error < minError) { - minError = error; - } - } - - return minError; - } - - /** - * Add a new value from the stream. - * - * @param v - */ - synchronized public void insert(long v) { - buffer[bufferCount] = v; - bufferCount++; - - count++; - - if (bufferCount == buffer.length) { - insertBatch(); - compress(); - } - } - - /** - * Merges items from buffer into the samples array in one pass. - * This is more efficient than doing an insert on every item. - */ - private void insertBatch() { - if (bufferCount == 0) { - return; - } - - Arrays.sort(buffer, 0, bufferCount); - - // Base case: no samples - int start = 0; - if (samples.size() == 0) { - SampleItem newItem = new SampleItem(buffer[0], 1, 0); - samples.add(newItem); - start++; - } - - ListIterator it = samples.listIterator(); - SampleItem item = it.next(); - for (int i = start; i < bufferCount; i++) { - long v = buffer[i]; - while (it.nextIndex() < samples.size() && item.value < v) { - item = it.next(); - } - // If we found that bigger item, back up so we insert ourselves before it - if (item.value > v) { - it.previous(); - } - // We use different indexes for the edge comparisons, because of the above - // if statement that adjusts the iterator - int delta; - if (it.previousIndex() == 0 || it.nextIndex() == samples.size()) { - delta = 0; - } else { - delta = ((int) Math.floor(allowableError(it.nextIndex()))) - 1; - } - SampleItem newItem = new SampleItem(v, 1, delta); - it.add(newItem); - item = newItem; - } - - bufferCount = 0; - } - - /** - * Try to remove extraneous items from the set of sampled items. This checks - * if an item is unnecessary based on the desired error bounds, and merges it - * with the adjacent item if it is. - */ - private void compress() { - if (samples.size() < 2) { - return; - } - - ListIterator it = samples.listIterator(); - SampleItem prev = null; - SampleItem next = it.next(); - - while (it.hasNext()) { - prev = next; - next = it.next(); - if (prev.g + next.g + next.delta <= allowableError(it.previousIndex())) { - next.g += prev.g; - // Remove prev. it.remove() kills the last thing returned. - it.previous(); - it.previous(); - it.remove(); - // it.next() is now equal to next, skip it back forward again - it.next(); - } - } - } - - /** - * Get the estimated value at the specified quantile. - * - * @param quantile Queried quantile, e.g. 0.50 or 0.99. - * @return Estimated value at that quantile. - */ - private long query(double quantile) throws IOException { - if (samples.size() == 0) { - throw new IOException("No samples present"); - } - - int rankMin = 0; - int desired = (int) (quantile * count); - - for (int i = 1; i < samples.size(); i++) { - SampleItem prev = samples.get(i - 1); - SampleItem cur = samples.get(i); - - rankMin += prev.g; - - if (rankMin + cur.g + cur.delta > desired + (allowableError(i) / 2)) { - return prev.value; - } - } - - // edge case of wanting max value - return samples.get(samples.size() - 1).value; - } - - /** - * Get a snapshot of the current values of all the tracked quantiles. - * - * @return snapshot of the tracked quantiles - * @throws IOException - * if no items have been added to the estimator - */ - synchronized public Map snapshot() throws IOException { - // flush the buffer first for best results - insertBatch(); - Map values = new HashMap(quantiles.length); - for (int i = 0; i < quantiles.length; i++) { - values.put(quantiles[i], query(quantiles[i].quantile)); - } - - return values; - } - - /** - * Returns the number of items that the estimator has processed - * - * @return count total number of items processed - */ - synchronized public long getCount() { - return count; - } - - /** - * Returns the number of samples kept by the estimator - * - * @return count current number of samples - */ - @VisibleForTesting - synchronized public int getSampleCount() { - return samples.size(); - } - - /** - * Resets the estimator, clearing out all previously inserted items - */ - synchronized public void clear() { - count = 0; - bufferCount = 0; - samples.clear(); - } - - /** - * Describes a measured value passed to the estimator, tracking additional - * metadata required by the CKMS algorithm. - */ - private static class SampleItem { - - /** - * Value of the sampled item (e.g. a measured latency value) - */ - public final long value; - - /** - * Difference between the lowest possible rank of the previous item, and - * the lowest possible rank of this item. - * - * The sum of the g of all previous items yields this item's lower bound. - */ - public int g; - - /** - * Difference between the item's greatest possible rank and lowest possible - * rank. - */ - public final int delta; - - public SampleItem(long value, int lowerDelta, int delta) { - this.value = value; - this.g = lowerDelta; - this.delta = delta; - } - - @Override - public String toString() { - return String.format("%d, %d, %d", value, g, delta); - } - } -} diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactory deleted file mode 100644 index acda6f4..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactory +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsAssignmentManagerSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsAssignmentManagerSource deleted file mode 100644 index b959147..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsAssignmentManagerSource +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.master.MetricsAssignmentManagerSourceImpl diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterFileSystemSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterFileSystemSource deleted file mode 100644 index 0ebd2fd..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterFileSystemSource +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.master.MetricsMasterFilesystemSourceImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterProcSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterProcSourceFactory deleted file mode 100644 index 3896388..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterProcSourceFactory +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.master.MetricsMasterProcSourceFactoryImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory deleted file mode 100644 index c42d892..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsSnapshotSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsSnapshotSource deleted file mode 100644 index 133a079..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsSnapshotSource +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.master.MetricsSnapshotSourceImpl diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.balancer.MetricsBalancerSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.balancer.MetricsBalancerSource deleted file mode 100644 index c7bf32a..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.balancer.MetricsBalancerSource +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.master.balancer.MetricsBalancerSourceImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.balancer.MetricsStochasticBalancerSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.balancer.MetricsStochasticBalancerSource deleted file mode 100644 index 80c0895..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.balancer.MetricsStochasticBalancerSource +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.master.balancer.MetricsStochasticBalancerSourceImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.metrics.MBeanSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.metrics.MBeanSource deleted file mode 100644 index 4298f8b..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.metrics.MBeanSource +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.metrics.MBeanSourceImpl diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory deleted file mode 100644 index 7904900..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource deleted file mode 100644 index ed92e85..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySourceImpl diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource deleted file mode 100644 index 4d92616..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.regionserver.wal.MetricsWALSourceImpl diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource deleted file mode 100644 index ecb15da..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceFactory deleted file mode 100644 index af7b783..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceFactory +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceFactoryImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource deleted file mode 100644 index 0f8fcde..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory deleted file mode 100644 index 24c4d70..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor deleted file mode 100644 index 2bc96b4..0000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.metrics2.lib.MetricsExecutorImpl diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java deleted file mode 100644 index ce142e8..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.TaskAttemptID; -import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; - -/** - * Compatibility shim layer implementation for Hadoop-2. - */ -public class HadoopShimsImpl implements HadoopShims { - - /** - * Returns a TaskAttemptContext instance created from the given parameters. - * @param job an instance of o.a.h.mapreduce.Job - * @param taskId an identifier for the task attempt id. Should be parsable by - * TaskAttemptId.forName() - * @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext - */ - @Override - @SuppressWarnings("unchecked") - public T createTestTaskAttemptContext(J job, String taskId) { - Job j = (Job)job; - return (T)new TaskAttemptContextImpl(j.getConfiguration(), TaskAttemptID.forName(taskId)); - } -} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java deleted file mode 100644 index 3fe60b8..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; - -/** - * Test for MetricsMasterProcSourceImpl - */ -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsMasterProcSourceImpl { - - @Test - public void testGetInstance() throws Exception { - MetricsMasterProcSourceFactory metricsMasterProcSourceFactory = CompatibilitySingletonFactory - .getInstance(MetricsMasterProcSourceFactory.class); - MetricsMasterProcSource masterProcSource = metricsMasterProcSourceFactory.create(null); - assertTrue(masterProcSource instanceof MetricsMasterProcSourceImpl); - assertSame(metricsMasterProcSourceFactory, - CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class)); - } - -} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java deleted file mode 100644 index f9508f5..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; - -/** - * Test for MetricsMasterSourceImpl - */ -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsMasterSourceImpl { - - @Test - public void testGetInstance() throws Exception { - MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory - .getInstance(MetricsMasterSourceFactory.class); - MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null); - assertTrue(masterSource instanceof MetricsMasterSourceImpl); - assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class)); - } - -} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java deleted file mode 100644 index 7381fb9..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.metrics; - -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -/** - * Test of default BaseSource for hadoop 2 - */ -@Category({MetricsTests.class, SmallTests.class}) -public class TestBaseSourceImpl { - - private static BaseSourceImpl bmsi; - - @BeforeClass - public static void setUp() throws Exception { - bmsi = new BaseSourceImpl("TestName", "test description", "testcontext", "TestContext"); - } - - @Test - public void testSetGauge() throws Exception { - bmsi.setGauge("testset", 100); - assertEquals(100, ((MutableGaugeLong) bmsi.metricsRegistry.get("testset")).value()); - bmsi.setGauge("testset", 300); - assertEquals(300, ((MutableGaugeLong) bmsi.metricsRegistry.get("testset")).value()); - - } - - @Test - public void testIncGauge() throws Exception { - bmsi.incGauge("testincgauge", 100); - assertEquals(100, ((MutableGaugeLong) bmsi.metricsRegistry.get("testincgauge")).value()); - bmsi.incGauge("testincgauge", 100); - assertEquals(200, ((MutableGaugeLong) bmsi.metricsRegistry.get("testincgauge")).value()); - - } - - @Test - public void testDecGauge() throws Exception { - bmsi.decGauge("testdec", 100); - assertEquals(-100, ((MutableGaugeLong) bmsi.metricsRegistry.get("testdec")).value()); - bmsi.decGauge("testdec", 100); - assertEquals(-200, ((MutableGaugeLong) bmsi.metricsRegistry.get("testdec")).value()); - - } - - @Test - public void testIncCounters() throws Exception { - bmsi.incCounters("testinccounter", 100); - assertEquals(100, ((MutableCounterLong) bmsi.metricsRegistry.get("testinccounter")).value()); - bmsi.incCounters("testinccounter", 100); - assertEquals(200, ((MutableCounterLong) bmsi.metricsRegistry.get("testinccounter")).value()); - - } - - @Test - public void testRemoveMetric() throws Exception { - bmsi.setGauge("testrmgauge", 100); - bmsi.removeMetric("testrmgauge"); - assertNull(bmsi.metricsRegistry.get("testrmgauge")); - } - -} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java deleted file mode 100644 index 05a5522..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; - -/** - * Test for MetricsRegionServerSourceImpl - */ -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsRegionServerSourceImpl { - - @Test - public void testGetInstance() throws Exception { - MetricsRegionServerSourceFactory metricsRegionServerSourceFactory = - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); - MetricsRegionServerSource serverSource = - metricsRegionServerSourceFactory.createServer(null); - assertTrue(serverSource instanceof MetricsRegionServerSourceImpl); - assertSame(metricsRegionServerSourceFactory, - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); - } - - - @Test(expected = RuntimeException.class) - public void testNoGetRegionServerMetricsSourceImpl() throws Exception { - // This should throw an exception because MetricsRegionServerSourceImpl should only - // be created by a factory. - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceImpl.class); - } -} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java deleted file mode 100644 index 3088260..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java +++ /dev/null @@ -1,142 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; - - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsRegionSourceImpl { - - @Test - public void testCompareToHashCodeEquals() throws Exception { - MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); - - MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST")); - MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST")); - MetricsRegionSource two = fact.createRegion(new RegionWrapperStub("TWO")); - - assertEquals(0, one.compareTo(oneClone)); - assertEquals(one.hashCode(), oneClone.hashCode()); - assertNotEquals(one, two); - - assertTrue( one.compareTo(two) != 0); - assertTrue( two.compareTo(one) != 0); - assertTrue( two.compareTo(one) != one.compareTo(two)); - assertTrue( two.compareTo(two) == 0); - } - - - @Test(expected = RuntimeException.class) - public void testNoGetRegionServerMetricsSourceImpl() throws Exception { - // This should throw an exception because MetricsRegionSourceImpl should only - // be created by a factory. - CompatibilitySingletonFactory.getInstance(MetricsRegionSource.class); - } - - static class RegionWrapperStub implements MetricsRegionWrapper { - - private String regionName; - - public RegionWrapperStub(String regionName) { - this.regionName = regionName; - } - - @Override - public String getTableName() { - return null; - } - - @Override - public String getNamespace() { - return null; - } - - @Override - public String getRegionName() { - return this.regionName; - } - - @Override - public long getNumStores() { - return 0; - } - - @Override - public long getNumStoreFiles() { - return 0; - } - - @Override - public long getMemstoreSize() { - return 0; - } - - @Override - public long getStoreFileSize() { - return 0; - } - - @Override - public long getReadRequestCount() { - return 0; - } - - @Override - public long getWriteRequestCount() { - return 0; - } - - @Override - public long getNumFilesCompacted() { - return 0; - } - - @Override - public long getNumBytesCompacted() { - return 0; - } - - @Override - public long getNumCompactionsCompleted() { - return 0; - } - - @Override - public int getRegionHashCode() { - return regionName.hashCode(); - } - - /** - * Always return 0 for testing - */ - @Override - public int getReplicaId() { - return 0; - } - } -} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java deleted file mode 100644 index ddfed45..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver.wal; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; - -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsWALSourceImpl { - - @Test - public void testGetInstance() throws Exception { - MetricsWALSource walSource = - CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); - assertTrue(walSource instanceof MetricsWALSourceImpl); - assertSame(walSource, - CompatibilitySingletonFactory.getInstance(MetricsWALSource.class)); - } -} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java deleted file mode 100644 index abbd49f..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication.regionserver; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.*; - -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsReplicationSourceFactoryImpl { - - - @Test - public void testGetInstance() throws Exception { - MetricsReplicationSourceFactory rms = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSourceFactory.class); - assertTrue(rms instanceof MetricsReplicationSourceFactoryImpl); - } - -} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java deleted file mode 100644 index acbadbe..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource; -import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertTrue; - -@Category({MetricsTests.class, SmallTests.class}) -/** Test for MetricsReplicationSourceImpl */ -public class TestMetricsReplicationSourceImpl { - - @Test - public void testGetInstance() throws Exception { - MetricsReplicationSource rms = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSource.class); - assertTrue(rms instanceof MetricsReplicationSourceImpl); - } -} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java deleted file mode 100644 index 8069388..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.rest; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.rest.MetricsRESTSource; -import org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -/** - * Test for hadoop 2's version of MetricsRESTSource - */ -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsRESTSourceImpl { - - @Test - public void ensureCompatRegistered() throws Exception { - assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl); - } - -} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java deleted file mode 100644 index 5503675..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java +++ /dev/null @@ -1,250 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.test; - -import org.apache.hadoop.hbase.metrics.BaseSource; -import org.apache.hadoop.metrics2.AbstractMetric; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsTag; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; - -import java.util.HashMap; -import java.util.Map; - -import static org.junit.Assert.*; - -/** - * A helper class that will allow tests to get into hadoop2's metrics2 values. - */ -public class MetricsAssertHelperImpl implements MetricsAssertHelper { - private Map tags = new HashMap(); - private Map gauges = new HashMap(); - private Map counters = new HashMap(); - - public class MockMetricsBuilder implements MetricsCollector { - - @Override - public MetricsRecordBuilder addRecord(String s) { - return new MockRecordBuilder(this); - } - - @Override - public MetricsRecordBuilder addRecord(MetricsInfo metricsInfo) { - return new MockRecordBuilder(this); - } - } - - public class MockRecordBuilder extends MetricsRecordBuilder { - - private final MetricsCollector mockMetricsBuilder; - - public MockRecordBuilder(MetricsCollector mockMetricsBuilder) { - - this.mockMetricsBuilder = mockMetricsBuilder; - } - - @Override - public MetricsRecordBuilder tag(MetricsInfo metricsInfo, String s) { - - tags.put(canonicalizeMetricName(metricsInfo.name()), s); - return this; - } - - @Override - public MetricsRecordBuilder add(MetricsTag metricsTag) { - tags.put(canonicalizeMetricName(metricsTag.name()), metricsTag.value()); - return this; - } - - @Override - public MetricsRecordBuilder add(AbstractMetric abstractMetric) { - gauges.put(canonicalizeMetricName(abstractMetric.name()), abstractMetric.value()); - return this; - } - - @Override - public MetricsRecordBuilder setContext(String s) { - return this; - } - - @Override - public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, int i) { - counters.put(canonicalizeMetricName(metricsInfo.name()), Long.valueOf(i)); - return this; - } - - @Override - public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, long l) { - counters.put(canonicalizeMetricName(metricsInfo.name()), Long.valueOf(l)); - return this; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, int i) { - gauges.put(canonicalizeMetricName(metricsInfo.name()), Long.valueOf(i)); - return this; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, long l) { - gauges.put(canonicalizeMetricName(metricsInfo.name()), Long.valueOf(l)); - return this; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, float v) { - gauges.put(canonicalizeMetricName(metricsInfo.name()), Double.valueOf(v)); - return this; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, double v) { - gauges.put(canonicalizeMetricName(metricsInfo.name()), Double.valueOf(v)); - return this; - } - - @Override - public MetricsCollector parent() { - return mockMetricsBuilder; - } - } - - @Override - public void init() { - // Make sure that the metrics system doesn't throw an exception when - // registering a source with the same name - DefaultMetricsSystem.setMiniClusterMode(true); - } - - @Override - public void assertTag(String name, String expected, BaseSource source) { - getMetrics(source); - String cName = canonicalizeMetricName(name); - assertEquals("Tags should be equal", expected, tags.get(cName)); - } - - @Override - public void assertGauge(String name, long expected, BaseSource source) { - long found = getGaugeLong(name, source); - assertEquals("Metrics Should be equal", (long) Long.valueOf(expected), found); - } - - @Override - public void assertGaugeGt(String name, long expected, BaseSource source) { - double found = getGaugeDouble(name, source); - assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected); - } - - @Override - public void assertGaugeLt(String name, long expected, BaseSource source) { - double found = getGaugeDouble(name, source); - assertTrue(name + "(" + found + ") should be less than " + expected, found < expected); - } - - @Override - public void assertGauge(String name, double expected, BaseSource source) { - double found = getGaugeDouble(name, source); - assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found, 0.01); - } - - @Override - public void assertGaugeGt(String name, double expected, BaseSource source) { - double found = getGaugeDouble(name, source); - assertTrue(name + "(" + found + ") should be greater than " + expected, found > expected); - } - - @Override - public void assertGaugeLt(String name, double expected, BaseSource source) { - double found = getGaugeDouble(name, source); - assertTrue(name + "(" + found + ") should be less than " + expected, found < expected); - } - - @Override - public void assertCounter(String name, long expected, BaseSource source) { - long found = getCounter(name, source); - assertEquals("Metrics Counters should be equal", (long) Long.valueOf(expected), found); - } - - @Override - public void assertCounterGt(String name, long expected, BaseSource source) { - long found = getCounter(name, source); - assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected); - } - - @Override - public void assertCounterLt(String name, long expected, BaseSource source) { - long found = getCounter(name, source); - assertTrue(name + "(" + found + ") should be less than " + expected, found < expected); - } - - @Override - public long getCounter(String name, BaseSource source) { - getMetrics(source); - String cName = canonicalizeMetricName(name); - assertNotNull("Should get counter "+cName + " but did not",counters.get(cName)); - return counters.get(cName).longValue(); - } - - @Override - public boolean checkCounterExists(String name, BaseSource source) { - getMetrics(source); - String cName = canonicalizeMetricName(name); - return (counters.get(cName) != null) ? true : false; - } - - @Override - public double getGaugeDouble(String name, BaseSource source) { - getMetrics(source); - String cName = canonicalizeMetricName(name); - assertNotNull("Should get gauge "+cName + " but did not",gauges.get(cName)); - return gauges.get(cName).doubleValue(); - } - - @Override - public long getGaugeLong(String name, BaseSource source) { - getMetrics(source); - String cName = canonicalizeMetricName(name); - assertNotNull("Should get gauge " + cName + " but did not", gauges.get(cName)); - return gauges.get(cName).longValue(); - } - - private void reset() { - tags.clear(); - gauges.clear(); - counters.clear(); - } - - private void getMetrics(BaseSource source) { - reset(); - if (!(source instanceof MetricsSource)) { - assertTrue("The Source passed must be a MetricsSource", false); - } - MetricsSource impl = (MetricsSource) source; - - impl.getMetrics(new MockMetricsBuilder(), true); - - } - - private String canonicalizeMetricName(String in) { - return in.toLowerCase().replaceAll("[^A-Za-z0-9 ]", ""); - } -} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java deleted file mode 100644 index c3a32b9..0000000 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.thrift; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.testclassification.MetricsTests; -import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory; -import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; - -/** - * Test for hadoop 2's version of MetricsThriftServerSourceFactory - */ -@Category({MetricsTests.class, SmallTests.class}) -public class TestMetricsThriftServerSourceFactoryImpl { - - @Test - public void testCompatabilityRegistered() throws Exception { - assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl); - } - - @Test - public void testCreateThriftOneSource() throws Exception { - //Make sure that the factory gives back a singleton. - assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(), - new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); - - } - - @Test - public void testCreateThriftTwoSource() throws Exception { - //Make sure that the factory gives back a singleton. - assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(), - new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); - } -} diff --git a/hbase-hadoop2-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.HadoopShims b/hbase-hadoop2-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.HadoopShims deleted file mode 100644 index ab77800..0000000 --- a/hbase-hadoop2-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.HadoopShims +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.HadoopShimsImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.test.MetricsAssertHelper b/hbase-hadoop2-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.test.MetricsAssertHelper deleted file mode 100644 index c40a7e9..0000000 --- a/hbase-hadoop2-compat/src/test/resources/META-INF/services/org.apache.hadoop.hbase.test.MetricsAssertHelper +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -org.apache.hadoop.hbase.test.MetricsAssertHelperImpl diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index 92243f0..c169823 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -213,15 +213,6 @@ org.apache.hbase - hbase-hadoop-compat - - - org.apache.hbase - ${compat.module} - ${project.version} - - - org.apache.hbase hbase-testing-util diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableMapReduceUtil.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableMapReduceUtil.java index e21dfec..c6b9725 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableMapReduceUtil.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableMapReduceUtil.java @@ -70,7 +70,6 @@ public class IntegrationTestTableMapReduceUtil implements Configurable, Tool { assertTrue(tmpjars.contains("hbase-common")); assertTrue(tmpjars.contains("hbase-protocol")); assertTrue(tmpjars.contains("hbase-client")); - assertTrue(tmpjars.contains("hbase-hadoop-compat")); assertTrue(tmpjars.contains("hbase-server")); // verify presence of 3rd party dependencies. diff --git a/hbase-prefix-tree/pom.xml b/hbase-prefix-tree/pom.xml index 03b4000..7e02344 100644 --- a/hbase-prefix-tree/pom.xml +++ b/hbase-prefix-tree/pom.xml @@ -121,15 +121,6 @@ hbase-common - org.apache.hbase - hbase-hadoop-compat - - - org.apache.hbase - ${compat.module} - ${project.version} - - com.google.guava guava diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml index 3443afc..b14d678 100644 --- a/hbase-rest/pom.xml +++ b/hbase-rest/pom.xml @@ -206,15 +206,6 @@ org.apache.hbase - hbase-hadoop-compat - - - org.apache.hbase - ${compat.module} - ${project.version} - - - org.apache.hbase hbase-server test-jar test diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java deleted file mode 100644 index e31037a..0000000 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.rest; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -import org.apache.hadoop.hbase.rest.MetricsRESTSource; - -@InterfaceAudience.Private -public class MetricsREST { - - public MetricsRESTSource getSource() { - return source; - } - - private MetricsRESTSource source; - - public MetricsREST() { - source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); - } - - /** - * @param inc How much to add to requests. - */ - public void incrementRequests(final int inc) { - source.incrementRequests(inc); - } - - /** - * @param inc How much to add to sucessfulGetCount. - */ - public void incrementSucessfulGetRequests(final int inc) { - source.incrementSucessfulGetRequests(inc); - } - - /** - * @param inc How much to add to sucessfulPutCount. - */ - public void incrementSucessfulPutRequests(final int inc) { - source.incrementSucessfulPutRequests(inc); - } - - /** - * @param inc How much to add to failedPutCount. - */ - public void incrementFailedPutRequests(final int inc) { - source.incrementFailedPutRequests(inc); - } - - /** - * @param inc How much to add to failedGetCount. - */ - public void incrementFailedGetRequests(final int inc) { - source.incrementFailedGetRequests(inc); - } - - /** - * @param inc How much to add to sucessfulDeleteCount. - */ - public void incrementSucessfulDeleteRequests(final int inc) { - source.incrementSucessfulDeleteRequests(inc); - } - - /** - * @param inc How much to add to failedDeleteCount. - */ - public void incrementFailedDeleteRequests(final int inc) { - source.incrementFailedDeleteRequests(inc); - } - - /** - * @param inc How much to add to sucessfulScanCount. - */ - public synchronized void incrementSucessfulScanRequests(final int inc) { - source.incrementSucessfulScanRequests(inc); - } - - /** - * @param inc How much to add to failedScanCount. - */ - public void incrementFailedScanRequests(final int inc) { - source.incrementFailedScanRequests(inc); - } - -} diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java index c88ac91..c389af2 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -65,7 +65,6 @@ public class MultiRowResource extends ResourceBase implements Constants { public Response get(final @Context UriInfo uriInfo) { MultivaluedMap params = uriInfo.getQueryParameters(); - servlet.getMetrics().incrementRequests(1); try { CellSetModel model = new CellSetModel(); for (String rk : params.get(ROW_KEYS_PARAM_NAME)) { @@ -92,16 +91,13 @@ public class MultiRowResource extends ResourceBase implements Constants { if (model.getRows().size() == 0) { //If no rows found. - servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.NOT_FOUND) .type(MIMETYPE_TEXT).entity("No rows found." + CRLF) .build(); } else { - servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(model).build(); } } catch (Exception e) { - servlet.getMetrics().incrementFailedGetRequests(1); return processException(e); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java index 8f64738..6c25a3d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java @@ -94,7 +94,6 @@ public class NamespacesInstanceResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("GET " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); // Respond to list of namespace tables requests. if(queryTables){ @@ -105,10 +104,8 @@ public class NamespacesInstanceResource extends ResourceBase { tableModel.add(new TableModel(tables[i].getTableName().getQualifierAsString())); } - servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(tableModel).build(); }catch(IOException e) { - servlet.getMetrics().incrementFailedGetRequests(1); throw new RuntimeException("Cannot retrieve table list for '" + namespace + "'."); } } @@ -117,10 +114,8 @@ public class NamespacesInstanceResource extends ResourceBase { try { NamespacesInstanceModel rowModel = new NamespacesInstanceModel(servlet.getAdmin(), namespace); - servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(rowModel).build(); } catch (IOException e) { - servlet.getMetrics().incrementFailedGetRequests(1); throw new RuntimeException("Cannot retrieve info for '" + namespace + "'."); } } @@ -138,7 +133,6 @@ public class NamespacesInstanceResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("PUT " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); return processUpdate(model, true, uriInfo); } @@ -154,12 +148,10 @@ public class NamespacesInstanceResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("PUT " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); try{ NamespacesInstanceModel model = new NamespacesInstanceModel(namespace); return processUpdate(model, true, uriInfo); - }catch(IOException ioe){ - servlet.getMetrics().incrementFailedPutRequests(1); + } catch(IOException ioe){ throw new RuntimeException("Cannot retrieve info for '" + namespace + "'."); } } @@ -179,7 +171,6 @@ public class NamespacesInstanceResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("POST " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); return processUpdate(model, false, uriInfo); } @@ -195,12 +186,10 @@ public class NamespacesInstanceResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("POST " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); try{ NamespacesInstanceModel model = new NamespacesInstanceModel(namespace); return processUpdate(model, false, uriInfo); }catch(IOException ioe){ - servlet.getMetrics().incrementFailedPutRequests(1); throw new RuntimeException("Cannot retrieve info for '" + namespace + "'."); } } @@ -209,7 +198,6 @@ public class NamespacesInstanceResource extends ResourceBase { private Response processUpdate(final NamespacesInstanceModel model, final boolean updateExisting, final UriInfo uriInfo) { if (servlet.isReadOnly()) { - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) .entity("Forbidden" + CRLF).build(); } @@ -220,13 +208,11 @@ public class NamespacesInstanceResource extends ResourceBase { admin = servlet.getAdmin(); namespaceExists = doesNamespaceExist(admin, namespace); }catch (IOException e) { - servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } // Do not allow creation if namespace already exists. if(!updateExisting && namespaceExists){ - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). entity("Namespace '" + namespace + "' already exists. Use REST PUT " + "to alter the existing namespace.").build(); @@ -234,7 +220,6 @@ public class NamespacesInstanceResource extends ResourceBase { // Do not allow altering if namespace does not exist. if (updateExisting && !namespaceExists){ - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). entity("Namespace '" + namespace + "' does not exist. Use " + "REST POST to create the namespace.").build(); @@ -253,18 +238,16 @@ public class NamespacesInstanceResource extends ResourceBase { } NamespaceDescriptor nsd = builder.build(); - try{ + try { if(updateExisting){ admin.modifyNamespace(nsd); }else{ admin.createNamespace(nsd); } - }catch (IOException e) { - servlet.getMetrics().incrementFailedPutRequests(1); + } catch (IOException e) { return processException(e); } - servlet.getMetrics().incrementSucessfulPutRequests(1); return Response.created(uriInfo.getAbsolutePath()).build(); } @@ -291,7 +274,6 @@ public class NamespacesInstanceResource extends ResourceBase { LOG.debug("DELETE " + uriInfo.getAbsolutePath()); } if (servlet.isReadOnly()) { - servlet.getMetrics().incrementFailedDeleteRequests(1); return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) .entity("Forbidden" + CRLF).build(); } @@ -305,11 +287,9 @@ public class NamespacesInstanceResource extends ResourceBase { } admin.deleteNamespace(namespace); - servlet.getMetrics().incrementSucessfulDeleteRequests(1); return Response.ok().build(); } catch (IOException e) { - servlet.getMetrics().incrementFailedDeleteRequests(1); return processException(e); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java index 0548fe8..0e1fde4 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java @@ -67,14 +67,11 @@ public class NamespacesResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("GET " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); try { NamespacesModel rowModel = null; rowModel = new NamespacesModel(servlet.getAdmin()); - servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(rowModel).build(); } catch (IOException e) { - servlet.getMetrics().incrementFailedGetRequests(1); throw new RuntimeException("Cannot retrieve list of namespaces."); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java index 1f751a6..b8b309f 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java @@ -39,7 +39,6 @@ public class RESTServlet implements Constants { private static final Logger LOG = Logger.getLogger(RESTServlet.class); private static RESTServlet INSTANCE; private final Configuration conf; - private final MetricsREST metrics = new MetricsREST(); private final ConnectionCache connectionCache; private final UserGroupInformation realUser; @@ -116,10 +115,6 @@ public class RESTServlet implements Constants { return conf; } - MetricsREST getMetrics() { - return metrics; - } - /** * Helper method to determine if server should * only respond to GET HTTP method requests. diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index 48721bb..25956ba 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -74,7 +74,6 @@ public class RegionsResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("GET " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); try { TableName tableName = TableName.valueOf(tableResource.getName()); TableInfoModel model = new TableInfoModel(tableName.getNameAsString()); @@ -93,15 +92,12 @@ public class RegionsResource extends ResourceBase { } ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); - servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } catch (TableNotFoundException e) { - servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.NOT_FOUND) .type(MIMETYPE_TEXT).entity("Not found" + CRLF) .build(); } catch (IOException e) { - servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.SERVICE_UNAVAILABLE) .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) .build(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java index c08bb8b..5bc338d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java @@ -75,14 +75,11 @@ public class RootResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("GET " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); try { ResponseBuilder response = Response.ok(getTableList()); response.cacheControl(cacheControl); - servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedGetRequests(1); return processException(e); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java index f922343..7ac2eb7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java @@ -88,14 +88,12 @@ public class RowResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("GET " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); MultivaluedMap params = uriInfo.getQueryParameters(); try { ResultGenerator generator = ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, !params.containsKey(NOCACHE_PARAM_NAME)); if (!generator.hasNext()) { - servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.NOT_FOUND) .type(MIMETYPE_TEXT).entity("Not found" + CRLF) .build(); @@ -119,10 +117,8 @@ public class RowResource extends ResourceBase { value = generator.next(); } while (value != null); model.addRow(rowModel); - servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(model).build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } } @@ -133,11 +129,9 @@ public class RowResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); } - servlet.getMetrics().incrementRequests(1); // doesn't make sense to use a non specific coordinate as this can only // return a single cell if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) { - servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) .entity("Bad request: Either 0 or more than 1 columns specified." + CRLF).build(); } @@ -147,7 +141,6 @@ public class RowResource extends ResourceBase { ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, !params.containsKey(NOCACHE_PARAM_NAME)); if (!generator.hasNext()) { - servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.NOT_FOUND) .type(MIMETYPE_TEXT).entity("Not found" + CRLF) .build(); @@ -155,18 +148,14 @@ public class RowResource extends ResourceBase { Cell value = generator.next(); ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); response.header("X-Timestamp", value.getTimestamp()); - servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedGetRequests(1); return processException(e); } } Response update(final CellSetModel model, final boolean replace) { - servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.FORBIDDEN) .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) .build(); @@ -192,7 +181,6 @@ public class RowResource extends ResourceBase { key = rowspec.getRow(); } if (key == null) { - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST) .type(MIMETYPE_TEXT).entity("Bad request: Row key not specified." + CRLF) .build(); @@ -207,7 +195,6 @@ public class RowResource extends ResourceBase { col = null; } if (col == null) { - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST) .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) .build(); @@ -228,10 +215,8 @@ public class RowResource extends ResourceBase { table = servlet.getTable(tableResource.getName()); table.put(puts); ResponseBuilder response = Response.ok(); - servlet.getMetrics().incrementSucessfulPutRequests(1); return response.build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } finally { if (table != null) try { @@ -245,9 +230,7 @@ public class RowResource extends ResourceBase { // This currently supports only update of one row at a time. Response updateBinary(final byte[] message, final HttpHeaders headers, final boolean replace) { - servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.FORBIDDEN) .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) .build(); @@ -274,7 +257,6 @@ public class RowResource extends ResourceBase { timestamp = Long.parseLong(vals.get(0)); } if (column == null) { - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST) .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) .build(); @@ -292,10 +274,8 @@ public class RowResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("PUT " + put.toString()); } - servlet.getMetrics().incrementSucessfulPutRequests(1); return Response.ok().build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } finally { if (table != null) try { @@ -355,9 +335,7 @@ public class RowResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("DELETE " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { - servlet.getMetrics().incrementFailedDeleteRequests(1); return Response.status(Response.Status.FORBIDDEN) .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) .build(); @@ -396,12 +374,10 @@ public class RowResource extends ResourceBase { try { table = servlet.getTable(tableResource.getName()); table.delete(delete); - servlet.getMetrics().incrementSucessfulDeleteRequests(1); if (LOG.isDebugEnabled()) { LOG.debug("DELETE " + delete.toString()); } } catch (Exception e) { - servlet.getMetrics().incrementFailedDeleteRequests(1); return processException(e); } finally { if (table != null) try { @@ -425,7 +401,6 @@ public class RowResource extends ResourceBase { try { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } @@ -439,7 +414,6 @@ public class RowResource extends ResourceBase { List cellModels = rowModel.getCells(); int cellModelCount = cellModels.size(); if (key == null || cellModelCount <= 1) { - servlet.getMetrics().incrementFailedPutRequests(1); return Response .status(Response.Status.BAD_REQUEST) .type(MIMETYPE_TEXT) @@ -463,7 +437,6 @@ public class RowResource extends ResourceBase { } } if (valueToPutCell == null) { - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) .entity("Bad request: The column to put and check do not match." + CRLF).build(); } else { @@ -473,7 +446,6 @@ public class RowResource extends ResourceBase { valueToCheckCell.getValue(), put); } } else { - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST) .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) .build(); @@ -483,16 +455,13 @@ public class RowResource extends ResourceBase { LOG.debug("CHECK-AND-PUT " + put.toString() + ", returns " + retValue); } if (!retValue) { - servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.NOT_MODIFIED) .type(MIMETYPE_TEXT).entity("Value not Modified" + CRLF) .build(); } ResponseBuilder response = Response.ok(); - servlet.getMetrics().incrementSucessfulPutRequests(1); return response.build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } finally { if (table != null) try { @@ -516,7 +485,6 @@ public class RowResource extends ResourceBase { try { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { - servlet.getMetrics().incrementFailedDeleteRequests(1); return Response.status(Response.Status.BAD_REQUEST) .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) .build(); @@ -527,7 +495,6 @@ public class RowResource extends ResourceBase { key = rowspec.getRow(); } if (key == null) { - servlet.getMetrics().incrementFailedDeleteRequests(1); return Response.status(Response.Status.BAD_REQUEST) .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) .build(); @@ -541,7 +508,6 @@ public class RowResource extends ResourceBase { try { valueToDeleteColumn = rowspec.getColumns()[0]; } catch (final ArrayIndexOutOfBoundsException e) { - servlet.getMetrics().incrementFailedDeleteRequests(1); return Response.status(Response.Status.BAD_REQUEST) .type(MIMETYPE_TEXT).entity("Bad request: Column not specified for check." + CRLF) .build(); @@ -560,7 +526,6 @@ public class RowResource extends ResourceBase { valueToDeleteCell.getValue(), delete); } } else { - servlet.getMetrics().incrementFailedDeleteRequests(1); return Response.status(Response.Status.BAD_REQUEST) .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) .build(); @@ -573,16 +538,13 @@ public class RowResource extends ResourceBase { } if (!retValue) { - servlet.getMetrics().incrementFailedDeleteRequests(1); return Response.status(Response.Status.NOT_MODIFIED) .type(MIMETYPE_TEXT).entity(" Delete check failed." + CRLF) .build(); } ResponseBuilder response = Response.ok(); - servlet.getMetrics().incrementSucessfulDeleteRequests(1); return response.build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedDeleteRequests(1); return processException(e); } finally { if (table != null) try { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java index ffb2fae..ce18611 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java @@ -77,9 +77,7 @@ public class ScannerInstanceResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("GET " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); if (generator == null) { - servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.NOT_FOUND) .type(MIMETYPE_TEXT).entity("Not found" + CRLF) .build(); @@ -97,12 +95,7 @@ public class ScannerInstanceResource extends ResourceBase { try { value = generator.next(); } catch (IllegalStateException e) { - if (ScannerResource.delete(id)) { - servlet.getMetrics().incrementSucessfulDeleteRequests(1); - } else { - servlet.getMetrics().incrementFailedDeleteRequests(1); - } - servlet.getMetrics().incrementFailedGetRequests(1); + ScannerResource.delete(id); return Response.status(Response.Status.GONE) .type(MIMETYPE_TEXT).entity("Gone" + CRLF) .build(); @@ -140,7 +133,6 @@ public class ScannerInstanceResource extends ResourceBase { model.addRow(rowModel); ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); - servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } @@ -151,7 +143,6 @@ public class ScannerInstanceResource extends ResourceBase { LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } - servlet.getMetrics().incrementRequests(1); try { Cell value = generator.next(); if (value == null) { @@ -165,15 +156,9 @@ public class ScannerInstanceResource extends ResourceBase { Base64.encodeBytes( KeyValue.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)))); response.header("X-Timestamp", value.getTimestamp()); - servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } catch (IllegalStateException e) { - if (ScannerResource.delete(id)) { - servlet.getMetrics().incrementSucessfulDeleteRequests(1); - } else { - servlet.getMetrics().incrementFailedDeleteRequests(1); - } - servlet.getMetrics().incrementFailedGetRequests(1); + ScannerResource.delete(id); return Response.status(Response.Status.GONE) .type(MIMETYPE_TEXT).entity("Gone" + CRLF) .build(); @@ -185,17 +170,12 @@ public class ScannerInstanceResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("DELETE " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { return Response.status(Response.Status.FORBIDDEN) .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) .build(); } - if (ScannerResource.delete(id)) { - servlet.getMetrics().incrementSucessfulDeleteRequests(1); - } else { - servlet.getMetrics().incrementFailedDeleteRequests(1); - } + ScannerResource.delete(id); return Response.ok().build(); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java index 6c424ce..4c9391d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java @@ -76,7 +76,6 @@ public class ScannerResource extends ResourceBase { Response update(final ScannerModel model, final boolean replace, final UriInfo uriInfo) { - servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { return Response.status(Response.Status.FORBIDDEN) .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) @@ -108,10 +107,8 @@ public class ScannerResource extends ResourceBase { } UriBuilder builder = uriInfo.getAbsolutePathBuilder(); URI uri = builder.path(id).build(); - servlet.getMetrics().incrementSucessfulPutRequests(1); return Response.created(uri).build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedPutRequests(1); if (e instanceof TableNotFoundException) { return Response.status(Response.Status.NOT_FOUND) .type(MIMETYPE_TEXT).entity("Not found" + CRLF) @@ -154,10 +151,7 @@ public class ScannerResource extends ResourceBase { final @PathParam("scanner") String id) throws IOException { ScannerInstanceResource instance = scanners.get(id); if (instance == null) { - servlet.getMetrics().incrementFailedGetRequests(1); return new ScannerInstanceResource(); - } else { - servlet.getMetrics().incrementSucessfulGetRequests(1); } return instance; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java index c0e7153..3cf98bd 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java @@ -89,15 +89,12 @@ public class SchemaResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("GET " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); try { ResponseBuilder response = Response.ok(new TableSchemaModel(getTableSchema())); response.cacheControl(cacheControl); - servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedGetRequests(1); return processException(e); } } @@ -125,10 +122,8 @@ public class SchemaResource extends ResourceBase { admin.disableTable(name); admin.modifyTable(name, htd); admin.enableTable(name); - servlet.getMetrics().incrementSucessfulPutRequests(1); } else try { admin.createTable(htd); - servlet.getMetrics().incrementSucessfulPutRequests(1); } catch (TableExistsException e) { // race, someone else created a table with the same name return Response.status(Response.Status.NOT_MODIFIED) @@ -137,7 +132,6 @@ public class SchemaResource extends ResourceBase { } return Response.created(uriInfo.getAbsolutePath()).build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } } @@ -171,10 +165,8 @@ public class SchemaResource extends ResourceBase { } finally { admin.enableTable(TableName.valueOf(tableResource.getName())); } - servlet.getMetrics().incrementSucessfulPutRequests(1); return Response.ok().build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } } @@ -190,7 +182,6 @@ public class SchemaResource extends ResourceBase { return update(name, model, uriInfo, admin); } } catch (Exception e) { - servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } } @@ -203,7 +194,6 @@ public class SchemaResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("PUT " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); return update(model, true, uriInfo); } @@ -215,7 +205,6 @@ public class SchemaResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("PUT " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); return update(model, false, uriInfo); } @@ -226,7 +215,6 @@ public class SchemaResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("DELETE " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) .entity("Forbidden" + CRLF).build(); @@ -237,10 +225,8 @@ public class SchemaResource extends ResourceBase { admin.disableTable(TableName.valueOf(tableResource.getName())); } catch (TableNotEnabledException e) { /* this is what we want anyway */ } admin.deleteTable(TableName.valueOf(tableResource.getName())); - servlet.getMetrics().incrementSucessfulDeleteRequests(1); return Response.ok().build(); } catch (Exception e) { - servlet.getMetrics().incrementFailedDeleteRequests(1); return processException(e); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java index a7e52bd..aede8db 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java @@ -66,7 +66,6 @@ public class StorageClusterStatusResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("GET " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); try { ClusterStatus status = servlet.getAdmin().getClusterStatus(); StorageClusterStatusModel model = new StorageClusterStatusModel(); @@ -97,10 +96,8 @@ public class StorageClusterStatusResource extends ResourceBase { } ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); - servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } catch (IOException e) { - servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.SERVICE_UNAVAILABLE) .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) .build(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java index 85e81f8..397dcc1 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java @@ -61,16 +61,13 @@ public class StorageClusterVersionResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("GET " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); try { StorageClusterVersionModel model = new StorageClusterVersionModel(); model.setVersion(servlet.getAdmin().getClusterStatus().getHBaseVersion()); ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); - servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } catch (IOException e) { - servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.SERVICE_UNAVAILABLE) .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) .build(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java index f87ef7e..73217c4 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java @@ -194,7 +194,6 @@ public class TableResource extends ResourceBase { tableScan.setCaching(fetchSize); return new TableScanResource(hTable.getScanner(tableScan), userRequestedLimit); } catch (Exception exp) { - servlet.getMetrics().incrementFailedScanRequests(1); processException(exp); LOG.warn(exp); return null; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java index 5cc2c7b..745b24c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java @@ -67,9 +67,7 @@ public class TableScanResource extends ResourceBase { @GET @Produces({ Constants.MIMETYPE_XML, Constants.MIMETYPE_JSON }) public CellSetModelStream get(final @Context UriInfo uriInfo) { - servlet.getMetrics().incrementRequests(1); final int rowsToSend = userRequestedLimit; - servlet.getMetrics().incrementSucessfulScanRequests(1); final Iterator itr = results.iterator(); return new CellSetModelStream(new ArrayList() { public Iterator iterator() { @@ -127,17 +125,14 @@ public class TableScanResource extends ResourceBase { @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime, @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime, @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks) { - servlet.getMetrics().incrementRequests(1); try { int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10); ProtobufStreamingUtil stream = new ProtobufStreamingUtil(this.results, contentType, userRequestedLimit, fetchSize); - servlet.getMetrics().incrementSucessfulScanRequests(1); ResponseBuilder response = Response.ok(stream); response.header("content-type", contentType); return response.build(); } catch (Exception exp) { - servlet.getMetrics().incrementFailedScanRequests(1); processException(exp); LOG.warn(exp); return null; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java index ae93825..1f8a3d7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java @@ -78,10 +78,8 @@ public class VersionResource extends ResourceBase { if (LOG.isDebugEnabled()) { LOG.debug("GET " + uriInfo.getAbsolutePath()); } - servlet.getMetrics().incrementRequests(1); ResponseBuilder response = Response.ok(new VersionModel(context)); response.cacheControl(cacheControl); - servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java index a5326af..b0a3d13 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java @@ -29,14 +29,11 @@ import java.util.List; import javax.xml.bind.JAXBException; import org.apache.commons.httpclient.Header; -import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.rest.client.Response; import org.apache.hadoop.hbase.rest.model.CellModel; import org.apache.hadoop.hbase.rest.model.CellSetModel; import org.apache.hadoop.hbase.rest.model.RowModel; -import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RestTests; import org.apache.hadoop.hbase.util.Bytes; @@ -46,9 +43,6 @@ import org.junit.experimental.categories.Category; @Category({RestTests.class, MediumTests.class}) public class TestGetAndPutResource extends RowResourceBase { - private static final MetricsAssertHelper METRICS_ASSERT = - CompatibilityFactory.getInstance(MetricsAssertHelper.class); - @Test public void testForbidden() throws IOException, JAXBException { conf.set("hbase.rest.readonly", "true"); @@ -408,33 +402,6 @@ public class TestGetAndPutResource extends RowResourceBase { } @Test - public void testMetrics() throws IOException, JAXBException { - final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; - Response response = client.put(path, Constants.MIMETYPE_BINARY, - Bytes.toBytes(VALUE_4)); - assertEquals(response.getCode(), 200); - Thread.yield(); - response = client.get(path, Constants.MIMETYPE_JSON); - assertEquals(response.getCode(), 200); - assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - response = deleteRow(TABLE, ROW_4); - assertEquals(response.getCode(), 200); - - UserProvider userProvider = UserProvider.instantiate(conf); - METRICS_ASSERT.assertCounterGt("requests", 2l, - RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); - - METRICS_ASSERT.assertCounterGt("successfulGet", 0l, - RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); - - METRICS_ASSERT.assertCounterGt("successfulPut", 0l, - RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); - - METRICS_ASSERT.assertCounterGt("successfulDelete", 0l, - RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); - } - - @Test public void testMultiColumnGetXML() throws Exception { String path = "/" + TABLE + "/fakerow"; CellSetModel cellSetModel = new CellSetModel(); diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 26aad71..0446b6d 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -406,28 +406,6 @@ commons-collections commons-collections - - org.apache.hbase - hbase-hadoop-compat - - - org.apache.hbase - hbase-hadoop-compat - test-jar - test - - - org.apache.hbase - ${compat.module} - ${project.version} - - - org.apache.hbase - ${compat.module} - ${project.version} - test-jar - test - com.google.code.findbugs diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon index f38cce9..9dc1c92 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon @@ -103,11 +103,6 @@ org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-

Server Metrics

- <& ServerMetricsTmpl; mWrap = regionServer.getRegionServerMetrics().getRegionServerWrapper(); &> -
- -
<& ../common/TaskMonitorTmpl; filter = filter &>
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon index 7740c53..073b189 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon @@ -32,7 +32,6 @@ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; org.apache.hadoop.hbase.client.RegionReplicaUtil; - org.apache.hadoop.hbase.regionserver.MetricsRegionWrapper; <%if (onlineRegions != null && onlineRegions.size() > 0) %> diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon deleted file mode 100644 index 13ccc3b..0000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon +++ /dev/null @@ -1,199 +0,0 @@ -<%doc> - -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -<%args> -MetricsRegionServerWrapper mWrap; - -<%import> -java.util.*; -org.apache.hadoop.hbase.regionserver.HRegionServer; -org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper; -org.apache.hadoop.hbase.util.Bytes; -org.apache.hadoop.hbase.HRegionInfo; -org.apache.hadoop.hbase.ServerName; -org.apache.hadoop.hbase.HBaseConfiguration; -org.apache.hadoop.hbase.protobuf.ProtobufUtil; -org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; -org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; -org.apache.hadoop.hbase.util.DirectMemoryUtils; -org.apache.hadoop.util.StringUtils; -com.codahale.metrics.Snapshot; -java.lang.management.ManagementFactory; - -
- -
-
- <& baseStats; mWrap = mWrap &> -
-
- <& memoryStats; mWrap = mWrap &> -
-
- <& requestStats; mWrap = mWrap &> -
-
- <& walStats; mWrap = mWrap &> -
-
- <& storeStats; mWrap = mWrap &> -
-
- <& queueStats; mWrap = mWrap &> -
-
-
- -<%def baseStats> -<%args> - MetricsRegionServerWrapper mWrap; - - - - - - - - - - - - - - - - - -
Requests Per SecondNum. RegionsBlock localityBlock locality (Secondary replicas)Slow WAL Append Count
<% String.format("%.0f", mWrap.getRequestsPerSecond()) %><% mWrap.getNumOnlineRegions() %><% mWrap.getPercentFileLocal() %><% mWrap.getPercentFileLocalSecondaryRegions() %><% mWrap.getNumWALSlowAppend() %>
- - -<%def memoryStats> -<%args> -MetricsRegionServerWrapper mWrap; - - - - - - - - - - - - - - - - - - -
Used HeapMax HeapDirect Memory UsedDirect Memory ConfiguredMemstore Size
- <% StringUtils.humanReadableInt(ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed()) %> - - <% StringUtils.humanReadableInt(ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()) %> - - <% StringUtils.humanReadableInt(DirectMemoryUtils.getDirectMemoryUsage()) %> - - <% StringUtils.humanReadableInt(DirectMemoryUtils.getDirectMemorySize()) %> - <% StringUtils.humanReadableInt(mWrap.getMemstoreSize()) %>
- - -<%def walStats> -<%args> -MetricsRegionServerWrapper mWrap; - - - - - - - - - - - - -
Num. WAL FilesSize. WAL Files (bytes)
<% mWrap.getNumWALFiles() %><% mWrap.getWALFileSize() %>
- - -<%def storeStats> -<%args> -MetricsRegionServerWrapper mWrap; - - - - - - - - - - - - - - - - - -
Num. StoresNum. StorefilesRoot Index Size (bytes)Index Size (bytes)Bloom Size (bytes)
<% StringUtils.humanReadableInt(mWrap.getNumStores()) %><% StringUtils.humanReadableInt(mWrap.getNumStoreFiles()) %><% StringUtils.humanReadableInt(mWrap.getStoreFileIndexSize()) %><% StringUtils.humanReadableInt(mWrap.getTotalStaticIndexSize()) %><% StringUtils.humanReadableInt(mWrap.getTotalStaticBloomSize()) %>
- - - -<%def requestStats> -<%args> -MetricsRegionServerWrapper mWrap; - - - - - - - - - - - - -
Request Per SecondRead Request CountWrite Request Count
<% String.format("%.0f", mWrap.getRequestsPerSecond()) %><% mWrap.getReadRequestsCount() %><% mWrap.getWriteRequestsCount() %>
- - -<%def queueStats> -<%args> -MetricsRegionServerWrapper mWrap; - - - - - - - - - - - -
Compaction Queue SizeFlush Queue Size
<% mWrap.getCompactionQueueSize() %><% mWrap.getFlushQueueSize() %>
- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java deleted file mode 100644 index 05bebb8..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java +++ /dev/null @@ -1,118 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import org.apache.hadoop.hbase.MultiActionResultTooLarge; -import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.RegionTooBusyException; -import org.apache.hadoop.hbase.UnknownScannerException; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; -import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; -import org.apache.hadoop.hbase.exceptions.RegionMovedException; - -@InterfaceAudience.Private -public class MetricsHBaseServer { - private MetricsHBaseServerSource source; - - public MetricsHBaseServer(String serverName, MetricsHBaseServerWrapper wrapper) { - source = CompatibilitySingletonFactory.getInstance(MetricsHBaseServerSourceFactory.class) - .create(serverName, wrapper); - } - - void authorizationSuccess() { - source.authorizationSuccess(); - } - - void authorizationFailure() { - source.authorizationFailure(); - } - - void authenticationFailure() { - source.authenticationFailure(); - } - - void authenticationSuccess() { - source.authenticationSuccess(); - } - - void authenticationFallback() { - source.authenticationFallback(); - } - - void sentBytes(long count) { - source.sentBytes(count); - } - - void receivedBytes(int count) { - source.receivedBytes(count); - } - - void sentResponse(long count) { source.sentResponse(count); } - - void receivedRequest(long count) { source.receivedRequest(count); } - - void dequeuedCall(int qTime) { - source.dequeuedCall(qTime); - } - - void processedCall(int processingTime) { - source.processedCall(processingTime); - } - - void totalCall(int totalTime) { - source.queuedAndProcessedCall(totalTime); - } - - public void exception(Throwable throwable) { - source.exception(); - - /** - * Keep some metrics for commonly seen exceptions - * - * Try and put the most common types first. - * Place child types before the parent type that they extend. - * - * If this gets much larger we might have to go to a hashmap - */ - if (throwable != null) { - if (throwable instanceof OutOfOrderScannerNextException) { - source.outOfOrderException(); - } else if (throwable instanceof RegionTooBusyException) { - source.tooBusyException(); - } else if (throwable instanceof UnknownScannerException) { - source.unknownScannerException(); - } else if (throwable instanceof RegionMovedException) { - source.movedRegionException(); - } else if (throwable instanceof NotServingRegionException) { - source.notServingRegionException(); - } else if (throwable instanceof FailedSanityCheckException) { - source.failedSanityException(); - } else if (throwable instanceof MultiActionResultTooLarge) { - source.multiActionTooLargeException(); - } - } - } - - public MetricsHBaseServerSource getMetricsSource() { - return source; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java deleted file mode 100644 index 63c4b32..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper { - - private RpcServer server; - - MetricsHBaseServerWrapperImpl(RpcServer server) { - this.server = server; - } - - private boolean isServerStarted() { - return this.server != null && this.server.isStarted(); - } - - @Override - public long getTotalQueueSize() { - if (!isServerStarted()) { - return 0; - } - return server.callQueueSize.get(); - } - - @Override - public int getGeneralQueueLength() { - if (!isServerStarted() || this.server.getScheduler() == null) { - return 0; - } - return server.getScheduler().getGeneralQueueLength(); - } - - @Override - public int getReplicationQueueLength() { - if (!isServerStarted() || this.server.getScheduler() == null) { - return 0; - } - return server.getScheduler().getReplicationQueueLength(); - } - - @Override - public int getPriorityQueueLength() { - if (!isServerStarted() || this.server.getScheduler() == null) { - return 0; - } - return server.getScheduler().getPriorityQueueLength(); - } - - @Override - public int getNumOpenConnections() { - if (!isServerStarted() || this.server.connectionList == null) { - return 0; - } - return server.connectionList.size(); - } - - @Override - public int getActiveRpcHandlerCount() { - if (!isServerStarted() || this.server.getScheduler() == null) { - return 0; - } - return server.getScheduler().getActiveRpcHandlerCount(); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index ed8d37d..1a45278 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -225,7 +225,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { // connections to nuke // during a cleanup - protected MetricsHBaseServer metrics; protected final Configuration conf; @@ -1463,7 +1462,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { } doRawSaslReply(SaslStatus.ERROR, null, sendToClient.getClass().getName(), sendToClient.getLocalizedMessage()); - metrics.authenticationFailure(); String clientIP = this.toString(); // attempting user could be null AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser); @@ -1486,7 +1484,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { + ugi + ". Negotiated QoP is " + saslServer.getNegotiatedProperty(Sasl.QOP)); } - metrics.authenticationSuccess(); AUDITLOG.info(AUTH_SUCCESSFUL_FOR + ugi); saslContextEstablished = true; } @@ -1565,7 +1562,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { } if (isSecurityEnabled && authMethod == AuthMethod.SIMPLE) { if (allowFallbackToSimpleAuth) { - metrics.authenticationFallback(); authenticatedWithFallback = true; } else { AccessDeniedException ae = new AccessDeniedException("Authentication is required"); @@ -1873,7 +1869,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { new Call(id, this.service, null, null, null, null, this, responder, totalRequestSize, null, null); ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); - metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION); InetSocketAddress address = getListenerAddress(); setupResponse(responseBuffer, callTooBig, CALL_QUEUE_TOO_BIG_EXCEPTION, "Call queue is full on " + (address != null ? address : "(channel closed)") + @@ -1909,8 +1904,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { " is unable to read call parameter from client " + getHostAddress(); LOG.warn(msg, t); - metrics.exception(t); - // probably the hbase hadoop version does not match the running hadoop version if (t instanceof LinkageError) { t = new DoNotRetryIOException(t); @@ -1949,12 +1942,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { ProxyUsers.authorize(ugi, this.getHostAddress(), conf); } authorize(ugi, connectionHeader, getHostInetAddress()); - metrics.authorizationSuccess(); } catch (AuthorizationException ae) { if (LOG.isDebugEnabled()) { LOG.debug("Connection authorization failed: " + ae.getMessage(), ae); } - metrics.authorizationFailure(); setupResponse(authFailedResponse, authFailedCall, new AccessDeniedException(ae), ae.getMessage()); responder.doRespond(authFailedCall); @@ -2071,7 +2062,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { listener = new Listener(name); this.port = listener.getAddress().getPort(); - this.metrics = new MetricsHBaseServer(name, new MetricsHBaseServerWrapperImpl(this)); this.tcpNoDelay = conf.getBoolean("hbase.ipc.server.tcpnodelay", true); this.tcpKeepAlive = conf.getBoolean("hbase.ipc.server.tcpkeepalive", true); @@ -2235,11 +2225,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { } long requestSize = param.getSerializedSize(); long responseSize = result.getSerializedSize(); - metrics.dequeuedCall(qTime); - metrics.processedCall(processingTime); - metrics.totalCall(totalTime); - metrics.receivedRequest(requestSize); - metrics.sentResponse(responseSize); // log any RPC responses that are slower than the configured warn // response time or larger than configured warning size boolean tooSlow = (processingTime > warnResponseTime && warnResponseTime > -1); @@ -2260,9 +2245,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { // need to pass it over the wire. if (e instanceof ServiceException) e = e.getCause(); - // increment the number of requests that were exceptions. - metrics.exception(e); - if (e instanceof LinkageError) throw new DoNotRetryIOException(e); if (e instanceof IOException) throw (IOException)e; LOG.error("Unexpected throwable object ", e); @@ -2381,13 +2363,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { return this.errorHandler; } - /** - * Returns the metrics instance for reporting RPC call statistics - */ - @Override - public MetricsHBaseServer getMetrics() { - return metrics; - } @Override public void addCallSize(final long diff) { @@ -2435,7 +2410,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChain) throws IOException { long count = bufferChain.write(channel, NIO_BUFFER_LIMIT); - if (count > 0) this.metrics.sentBytes(count); return count; } @@ -2456,9 +2430,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer) : channelIO(channel, null, buffer); - if (count > 0) { - metrics.receivedBytes(count); - } return count; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java index ab8b485..b28cb43 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java @@ -56,11 +56,6 @@ public interface RpcServerInterface { HBaseRPCErrorHandler getErrorHandler(); /** - * Returns the metrics instance for reporting RPC call statistics - */ - MetricsHBaseServer getMetrics(); - - /** * Add/subtract from the current size of all outstanding calls. Called on setup of a call to add * call total size and then again at end of a call to remove the call size. * @param diff Change (plus or minus) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java new file mode 100644 index 0000000..1623c00 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.mapreduce; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.Cluster; +import org.apache.hadoop.mapreduce.JobSubmissionFiles; + +/** + * Utility methods to interact with a job. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public abstract class JobUtil { + private static final Log LOG = LogFactory.getLog(JobUtil.class); + + protected JobUtil() { + super(); + } + + /** + * Initializes the staging directory and returns the path. + * + * @param conf system configuration + * @return staging directory path + * @throws IOException + * @throws InterruptedException + */ + public static Path getStagingDir(Configuration conf) + throws IOException, InterruptedException { + return JobSubmissionFiles.getStagingDir(new Cluster(conf), conf); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index cf5f7ac..f1db3d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -777,7 +777,6 @@ public class TableMapReduceUtil { org.apache.hadoop.hbase.HConstants.class, // hbase-common org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class, // hbase-protocol org.apache.hadoop.hbase.client.Put.class, // hbase-client - org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-server // pull necessary dependencies org.apache.zookeeper.ZooKeeper.class, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index f7f98fe..8c54a4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -108,8 +108,6 @@ public class AssignmentManager { private LoadBalancer balancer; - private final MetricsAssignmentManager metricsAssignmentManager; - private final TableLockManager tableLockManager; private AtomicInteger numRegionsOpened = new AtomicInteger(0); @@ -207,13 +205,12 @@ public class AssignmentManager { * @param serverManager serverManager for associated HMaster * @param balancer implementation of {@link LoadBalancer} * @param service Executor service - * @param metricsMaster metrics manager * @param tableLockManager TableLock manager * @throws IOException */ public AssignmentManager(MasterServices server, ServerManager serverManager, final LoadBalancer balancer, - final ExecutorService service, MetricsMaster metricsMaster, + final ExecutorService service, final TableLockManager tableLockManager, final TableStateManager tableStateManager) throws IOException { @@ -250,7 +247,6 @@ public class AssignmentManager { this.bulkPerRegionOpenTimeGuesstimate = conf.getInt("hbase.bulk.assignment.perregion.open.time", 10000); - this.metricsAssignmentManager = new MetricsAssignmentManager(); this.tableLockManager = tableLockManager; } @@ -673,187 +669,183 @@ public class AssignmentManager { boolean assign(final ServerName destination, final List regions) throws InterruptedException { long startTime = EnvironmentEdgeManager.currentTime(); + int regionCount = regions.size(); + if (regionCount == 0) { + return true; + } + LOG.info("Assigning " + regionCount + " region(s) to " + destination.toString()); + Set encodedNames = new HashSet(regionCount); + for (HRegionInfo region : regions) { + encodedNames.add(region.getEncodedName()); + } + + List failedToOpenRegions = new ArrayList(); + Map locks = locker.acquireLocks(encodedNames); try { - int regionCount = regions.size(); - if (regionCount == 0) { - return true; - } - LOG.info("Assigning " + regionCount + " region(s) to " + destination.toString()); - Set encodedNames = new HashSet(regionCount); + Map plans = new HashMap(regionCount); + List states = new ArrayList(regionCount); for (HRegionInfo region : regions) { - encodedNames.add(region.getEncodedName()); + String encodedName = region.getEncodedName(); + if (!isDisabledorDisablingRegionInRIT(region)) { + RegionState state = forceRegionStateToOffline(region, false); + boolean onDeadServer = false; + if (state != null) { + if (regionStates.wasRegionOnDeadServer(encodedName)) { + LOG.info("Skip assigning " + region.getRegionNameAsString() + + ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName) + + " is dead but not processed yet"); + onDeadServer = true; + } else { + RegionPlan plan = new RegionPlan(region, state.getServerName(), destination); + plans.put(encodedName, plan); + states.add(state); + continue; + } + } + // Reassign if the region wasn't on a dead server + if (!onDeadServer) { + LOG.info("failed to force region state to offline, " + + "will reassign later: " + region); + failedToOpenRegions.add(region); // assign individually later + } + } + // Release the lock, this region is excluded from bulk assign because + // we can't update its state, or set its znode to offline. + Lock lock = locks.remove(encodedName); + lock.unlock(); } - List failedToOpenRegions = new ArrayList(); - Map locks = locker.acquireLocks(encodedNames); + if (server.isStopped()) { + return false; + } + + // Add region plans, so we can updateTimers when one region is opened so + // that unnecessary timeout on RIT is reduced. + this.addPlans(plans); + + List>> regionOpenInfos = + new ArrayList>>(states.size()); + for (RegionState state: states) { + HRegionInfo region = state.getRegion(); + regionStates.updateRegionState( + region, State.PENDING_OPEN, destination); + List favoredNodes = ServerName.EMPTY_SERVER_LIST; + if (this.shouldAssignRegionsWithFavoredNodes) { + favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region); + } + regionOpenInfos.add(new Pair>( + region, favoredNodes)); + } + + // Move on to open regions. try { - Map plans = new HashMap(regionCount); - List states = new ArrayList(regionCount); - for (HRegionInfo region : regions) { - String encodedName = region.getEncodedName(); - if (!isDisabledorDisablingRegionInRIT(region)) { - RegionState state = forceRegionStateToOffline(region, false); - boolean onDeadServer = false; - if (state != null) { - if (regionStates.wasRegionOnDeadServer(encodedName)) { - LOG.info("Skip assigning " + region.getRegionNameAsString() - + ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName) - + " is dead but not processed yet"); - onDeadServer = true; - } else { - RegionPlan plan = new RegionPlan(region, state.getServerName(), destination); - plans.put(encodedName, plan); - states.add(state); + // Send OPEN RPC. If it fails on a IOE or RemoteException, + // regions will be assigned individually. + Configuration conf = server.getConfiguration(); + long maxWaitTime = System.currentTimeMillis() + + conf.getLong("hbase.regionserver.rpc.startup.waittime", 60000); + for (int i = 1; i <= maximumAttempts && !server.isStopped(); i++) { + try { + List regionOpeningStateList = serverManager + .sendRegionOpen(destination, regionOpenInfos); + for (int k = 0, n = regionOpeningStateList.size(); k < n; k++) { + RegionOpeningState openingState = regionOpeningStateList.get(k); + if (openingState != RegionOpeningState.OPENED) { + HRegionInfo region = regionOpenInfos.get(k).getFirst(); + LOG.info("Got opening state " + openingState + + ", will reassign later: " + region); + // Failed opening this region, reassign it later + forceRegionStateToOffline(region, true); + failedToOpenRegions.add(region); + } + } + break; + } catch (IOException e) { + if (e instanceof RemoteException) { + e = ((RemoteException)e).unwrapRemoteException(); + } + if (e instanceof RegionServerStoppedException) { + LOG.warn("The region server was shut down, ", e); + // No need to retry, the region server is a goner. + return false; + } else if (e instanceof ServerNotRunningYetException) { + long now = System.currentTimeMillis(); + if (now < maxWaitTime) { + if (LOG.isDebugEnabled()) { + LOG.debug("Server is not yet up; waiting up to " + + (maxWaitTime - now) + "ms", e); + } + Thread.sleep(100); + i--; // reset the try count continue; } + } else if (e instanceof java.net.SocketTimeoutException + && this.serverManager.isServerOnline(destination)) { + // In case socket is timed out and the region server is still online, + // the openRegion RPC could have been accepted by the server and + // just the response didn't go through. So we will retry to + // open the region on the same server. + if (LOG.isDebugEnabled()) { + LOG.debug("Bulk assigner openRegion() to " + destination + + " has timed out, but the regions might" + + " already be opened on it.", e); + } + // wait and reset the re-try count, server might be just busy. + Thread.sleep(100); + i--; + continue; + } else if (e instanceof FailedServerException && i < maximumAttempts) { + // In case the server is in the failed server list, no point to + // retry too soon. Retry after the failed_server_expiry time + long sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, + RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); + if (LOG.isDebugEnabled()) { + LOG.debug(destination + " is on failed server list; waiting " + + sleepTime + "ms", e); + } + Thread.sleep(sleepTime); + continue; } - // Reassign if the region wasn't on a dead server - if (!onDeadServer) { - LOG.info("failed to force region state to offline, " - + "will reassign later: " + region); - failedToOpenRegions.add(region); // assign individually later - } + throw e; } - // Release the lock, this region is excluded from bulk assign because - // we can't update its state, or set its znode to offline. - Lock lock = locks.remove(encodedName); - lock.unlock(); } - - if (server.isStopped()) { - return false; - } - - // Add region plans, so we can updateTimers when one region is opened so - // that unnecessary timeout on RIT is reduced. - this.addPlans(plans); - - List>> regionOpenInfos = - new ArrayList>>(states.size()); + } catch (IOException e) { + // Can be a socket timeout, EOF, NoRouteToHost, etc + LOG.info("Unable to communicate with " + destination + + " in order to assign regions, ", e); for (RegionState state: states) { HRegionInfo region = state.getRegion(); - regionStates.updateRegionState( - region, State.PENDING_OPEN, destination); - List favoredNodes = ServerName.EMPTY_SERVER_LIST; - if (this.shouldAssignRegionsWithFavoredNodes) { - favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region); - } - regionOpenInfos.add(new Pair>( - region, favoredNodes)); + forceRegionStateToOffline(region, true); } - - // Move on to open regions. - try { - // Send OPEN RPC. If it fails on a IOE or RemoteException, - // regions will be assigned individually. - Configuration conf = server.getConfiguration(); - long maxWaitTime = System.currentTimeMillis() + - conf.getLong("hbase.regionserver.rpc.startup.waittime", 60000); - for (int i = 1; i <= maximumAttempts && !server.isStopped(); i++) { - try { - List regionOpeningStateList = serverManager - .sendRegionOpen(destination, regionOpenInfos); - for (int k = 0, n = regionOpeningStateList.size(); k < n; k++) { - RegionOpeningState openingState = regionOpeningStateList.get(k); - if (openingState != RegionOpeningState.OPENED) { - HRegionInfo region = regionOpenInfos.get(k).getFirst(); - LOG.info("Got opening state " + openingState - + ", will reassign later: " + region); - // Failed opening this region, reassign it later - forceRegionStateToOffline(region, true); - failedToOpenRegions.add(region); - } - } - break; - } catch (IOException e) { - if (e instanceof RemoteException) { - e = ((RemoteException)e).unwrapRemoteException(); - } - if (e instanceof RegionServerStoppedException) { - LOG.warn("The region server was shut down, ", e); - // No need to retry, the region server is a goner. - return false; - } else if (e instanceof ServerNotRunningYetException) { - long now = System.currentTimeMillis(); - if (now < maxWaitTime) { - if (LOG.isDebugEnabled()) { - LOG.debug("Server is not yet up; waiting up to " + - (maxWaitTime - now) + "ms", e); - } - Thread.sleep(100); - i--; // reset the try count - continue; - } - } else if (e instanceof java.net.SocketTimeoutException - && this.serverManager.isServerOnline(destination)) { - // In case socket is timed out and the region server is still online, - // the openRegion RPC could have been accepted by the server and - // just the response didn't go through. So we will retry to - // open the region on the same server. - if (LOG.isDebugEnabled()) { - LOG.debug("Bulk assigner openRegion() to " + destination - + " has timed out, but the regions might" - + " already be opened on it.", e); - } - // wait and reset the re-try count, server might be just busy. - Thread.sleep(100); - i--; - continue; - } else if (e instanceof FailedServerException && i < maximumAttempts) { - // In case the server is in the failed server list, no point to - // retry too soon. Retry after the failed_server_expiry time - long sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, - RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); - if (LOG.isDebugEnabled()) { - LOG.debug(destination + " is on failed server list; waiting " - + sleepTime + "ms", e); - } - Thread.sleep(sleepTime); - continue; - } - throw e; - } - } - } catch (IOException e) { - // Can be a socket timeout, EOF, NoRouteToHost, etc - LOG.info("Unable to communicate with " + destination - + " in order to assign regions, ", e); - for (RegionState state: states) { - HRegionInfo region = state.getRegion(); - forceRegionStateToOffline(region, true); - } - return false; - } - } finally { - for (Lock lock : locks.values()) { - lock.unlock(); - } - } - - if (!failedToOpenRegions.isEmpty()) { - for (HRegionInfo region : failedToOpenRegions) { - if (!regionStates.isRegionOnline(region)) { - invokeAssign(region); - } - } - } - - // wait for assignment completion - ArrayList userRegionSet = new ArrayList(regions.size()); - for (HRegionInfo region: regions) { - if (!region.getTable().isSystemTable()) { - userRegionSet.add(region); - } - } - if (!waitForAssignment(userRegionSet, true, userRegionSet.size(), - System.currentTimeMillis())) { - LOG.debug("some user regions are still in transition: " + userRegionSet); + return false; } - LOG.debug("Bulk assigning done for " + destination); - return true; } finally { - metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTime() - startTime); + for (Lock lock : locks.values()) { + lock.unlock(); + } } + + if (!failedToOpenRegions.isEmpty()) { + for (HRegionInfo region : failedToOpenRegions) { + if (!regionStates.isRegionOnline(region)) { + invokeAssign(region); + } + } + } + + // wait for assignment completion + ArrayList userRegionSet = new ArrayList(regions.size()); + for (HRegionInfo region: regions) { + if (!region.getTable().isSystemTable()) { + userRegionSet.add(region); + } + } + if (!waitForAssignment(userRegionSet, true, userRegionSet.size(), + System.currentTimeMillis())) { + LOG.debug("some user regions are still in transition: " + userRegionSet); + } + LOG.debug("Bulk assigning done for " + destination); + return true; } /** @@ -992,184 +984,180 @@ public class AssignmentManager { */ private void assign(RegionState state, boolean forceNewPlan) { long startTime = EnvironmentEdgeManager.currentTime(); - try { - Configuration conf = server.getConfiguration(); - RegionPlan plan = null; - long maxWaitTime = -1; - HRegionInfo region = state.getRegion(); - Throwable previousException = null; - for (int i = 1; i <= maximumAttempts; i++) { - if (server.isStopped() || server.isAborted()) { - LOG.info("Skip assigning " + region.getRegionNameAsString() - + ", the server is stopped/aborted"); - return; - } - - if (plan == null) { // Get a server for the region at first - try { - plan = getRegionPlan(region, forceNewPlan); - } catch (HBaseIOException e) { - LOG.warn("Failed to get region plan", e); - } - } - - if (plan == null) { - LOG.warn("Unable to determine a plan to assign " + region); - - // For meta region, we have to keep retrying until succeeding - if (region.isMetaRegion()) { - if (i == maximumAttempts) { - i = 0; // re-set attempt count to 0 for at least 1 retry - - LOG.warn("Unable to determine a plan to assign a hbase:meta region " + region + - " after maximumAttempts (" + this.maximumAttempts + - "). Reset attempts count and continue retrying."); - } - waitForRetryingMetaAssignment(); - continue; - } - - regionStates.updateRegionState(region, State.FAILED_OPEN); - return; - } - LOG.info("Assigning " + region.getRegionNameAsString() + - " to " + plan.getDestination().toString()); - // Transition RegionState to PENDING_OPEN - regionStates.updateRegionState(region, - State.PENDING_OPEN, plan.getDestination()); - - boolean needNewPlan = false; - final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() + - " to " + plan.getDestination(); + Configuration conf = server.getConfiguration(); + RegionPlan plan = null; + long maxWaitTime = -1; + HRegionInfo region = state.getRegion(); + Throwable previousException = null; + for (int i = 1; i <= maximumAttempts; i++) { + if (server.isStopped() || server.isAborted()) { + LOG.info("Skip assigning " + region.getRegionNameAsString() + + ", the server is stopped/aborted"); + return; + } + + if (plan == null) { // Get a server for the region at first try { - List favoredNodes = ServerName.EMPTY_SERVER_LIST; - if (this.shouldAssignRegionsWithFavoredNodes) { - favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region); - } - serverManager.sendRegionOpen(plan.getDestination(), region, favoredNodes); - return; // we're done - } catch (Throwable t) { - if (t instanceof RemoteException) { - t = ((RemoteException) t).unwrapRemoteException(); - } - previousException = t; - - // Should we wait a little before retrying? If the server is starting it's yes. - boolean hold = (t instanceof ServerNotRunningYetException); - - // In case socket is timed out and the region server is still online, - // the openRegion RPC could have been accepted by the server and - // just the response didn't go through. So we will retry to - // open the region on the same server. - boolean retry = !hold && (t instanceof java.net.SocketTimeoutException - && this.serverManager.isServerOnline(plan.getDestination())); - - if (hold) { - LOG.warn(assignMsg + ", waiting a little before trying on the same region server " + - "try=" + i + " of " + this.maximumAttempts, t); - - if (maxWaitTime < 0) { - maxWaitTime = EnvironmentEdgeManager.currentTime() - + this.server.getConfiguration().getLong( - "hbase.regionserver.rpc.startup.waittime", 60000); - } - try { - long now = EnvironmentEdgeManager.currentTime(); - if (now < maxWaitTime) { - if (LOG.isDebugEnabled()) { - LOG.debug("Server is not yet up; waiting up to " - + (maxWaitTime - now) + "ms", t); - } - Thread.sleep(100); - i--; // reset the try count - } else { - LOG.debug("Server is not up for a while; try a new one", t); - needNewPlan = true; - } - } catch (InterruptedException ie) { - LOG.warn("Failed to assign " - + region.getRegionNameAsString() + " since interrupted", ie); - regionStates.updateRegionState(region, State.FAILED_OPEN); - Thread.currentThread().interrupt(); - return; - } - } else if (retry) { - i--; // we want to retry as many times as needed as long as the RS is not dead. - if (LOG.isDebugEnabled()) { - LOG.debug(assignMsg + ", trying to assign to the same region server due ", t); - } - } else { - needNewPlan = true; - LOG.warn(assignMsg + ", trying to assign elsewhere instead;" + - " try=" + i + " of " + this.maximumAttempts, t); - } + plan = getRegionPlan(region, forceNewPlan); + } catch (HBaseIOException e) { + LOG.warn("Failed to get region plan", e); } + } - if (i == this.maximumAttempts) { - // For meta region, we have to keep retrying until succeeding - if (region.isMetaRegion()) { + if (plan == null) { + LOG.warn("Unable to determine a plan to assign " + region); + + // For meta region, we have to keep retrying until succeeding + if (region.isMetaRegion()) { + if (i == maximumAttempts) { i = 0; // re-set attempt count to 0 for at least 1 retry - LOG.warn(assignMsg + - ", trying to assign a hbase:meta region reached to maximumAttempts (" + - this.maximumAttempts + "). Reset attempt counts and continue retrying."); - waitForRetryingMetaAssignment(); - } - else { - // Don't reset the region state or get a new plan any more. - // This is the last try. - continue; + + LOG.warn("Unable to determine a plan to assign a hbase:meta region " + region + + " after maximumAttempts (" + this.maximumAttempts + + "). Reset attempts count and continue retrying."); } + waitForRetryingMetaAssignment(); + continue; + } + + regionStates.updateRegionState(region, State.FAILED_OPEN); + return; + } + LOG.info("Assigning " + region.getRegionNameAsString() + + " to " + plan.getDestination().toString()); + // Transition RegionState to PENDING_OPEN + regionStates.updateRegionState(region, + State.PENDING_OPEN, plan.getDestination()); + + boolean needNewPlan = false; + final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() + + " to " + plan.getDestination(); + try { + List favoredNodes = ServerName.EMPTY_SERVER_LIST; + if (this.shouldAssignRegionsWithFavoredNodes) { + favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region); } + serverManager.sendRegionOpen(plan.getDestination(), region, favoredNodes); + return; // we're done + } catch (Throwable t) { + if (t instanceof RemoteException) { + t = ((RemoteException) t).unwrapRemoteException(); + } + previousException = t; + + // Should we wait a little before retrying? If the server is starting it's yes. + boolean hold = (t instanceof ServerNotRunningYetException); + + // In case socket is timed out and the region server is still online, + // the openRegion RPC could have been accepted by the server and + // just the response didn't go through. So we will retry to + // open the region on the same server. + boolean retry = !hold && (t instanceof java.net.SocketTimeoutException + && this.serverManager.isServerOnline(plan.getDestination())); - // If region opened on destination of present plan, reassigning to new - // RS may cause double assignments. In case of RegionAlreadyInTransitionException - // reassigning to same RS. - if (needNewPlan) { - // Force a new plan and reassign. Will return null if no servers. - // The new plan could be the same as the existing plan since we don't - // exclude the server of the original plan, which should not be - // excluded since it could be the only server up now. - RegionPlan newPlan = null; + if (hold) { + LOG.warn(assignMsg + ", waiting a little before trying on the same region server " + + "try=" + i + " of " + this.maximumAttempts, t); + + if (maxWaitTime < 0) { + maxWaitTime = EnvironmentEdgeManager.currentTime() + + this.server.getConfiguration().getLong( + "hbase.regionserver.rpc.startup.waittime", 60000); + } try { - newPlan = getRegionPlan(region, true); - } catch (HBaseIOException e) { - LOG.warn("Failed to get region plan", e); - } - if (newPlan == null) { - regionStates.updateRegionState(region, State.FAILED_OPEN); - LOG.warn("Unable to find a viable location to assign region " + - region.getRegionNameAsString()); - return; - } - - if (plan != newPlan && !plan.getDestination().equals(newPlan.getDestination())) { - // Clean out plan we failed execute and one that doesn't look like it'll - // succeed anyways; we need a new plan! - // Transition back to OFFLINE - regionStates.updateRegionState(region, State.OFFLINE); - plan = newPlan; - } else if(plan.getDestination().equals(newPlan.getDestination()) && - previousException instanceof FailedServerException) { - try { - LOG.info("Trying to re-assign " + region.getRegionNameAsString() + - " to the same failed server."); - Thread.sleep(1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, - RpcClient.FAILED_SERVER_EXPIRY_DEFAULT)); - } catch (InterruptedException ie) { - LOG.warn("Failed to assign " - + region.getRegionNameAsString() + " since interrupted", ie); - regionStates.updateRegionState(region, State.FAILED_OPEN); - Thread.currentThread().interrupt(); - return; + long now = EnvironmentEdgeManager.currentTime(); + if (now < maxWaitTime) { + if (LOG.isDebugEnabled()) { + LOG.debug("Server is not yet up; waiting up to " + + (maxWaitTime - now) + "ms", t); + } + Thread.sleep(100); + i--; // reset the try count + } else { + LOG.debug("Server is not up for a while; try a new one", t); + needNewPlan = true; } + } catch (InterruptedException ie) { + LOG.warn("Failed to assign " + + region.getRegionNameAsString() + " since interrupted", ie); + regionStates.updateRegionState(region, State.FAILED_OPEN); + Thread.currentThread().interrupt(); + return; + } + } else if (retry) { + i--; // we want to retry as many times as needed as long as the RS is not dead. + if (LOG.isDebugEnabled()) { + LOG.debug(assignMsg + ", trying to assign to the same region server due ", t); + } + } else { + needNewPlan = true; + LOG.warn(assignMsg + ", trying to assign elsewhere instead;" + + " try=" + i + " of " + this.maximumAttempts, t); + } + } + + if (i == this.maximumAttempts) { + // For meta region, we have to keep retrying until succeeding + if (region.isMetaRegion()) { + i = 0; // re-set attempt count to 0 for at least 1 retry + LOG.warn(assignMsg + + ", trying to assign a hbase:meta region reached to maximumAttempts (" + + this.maximumAttempts + "). Reset attempt counts and continue retrying."); + waitForRetryingMetaAssignment(); + } + else { + // Don't reset the region state or get a new plan any more. + // This is the last try. + continue; + } + } + + // If region opened on destination of present plan, reassigning to new + // RS may cause double assignments. In case of RegionAlreadyInTransitionException + // reassigning to same RS. + if (needNewPlan) { + // Force a new plan and reassign. Will return null if no servers. + // The new plan could be the same as the existing plan since we don't + // exclude the server of the original plan, which should not be + // excluded since it could be the only server up now. + RegionPlan newPlan = null; + try { + newPlan = getRegionPlan(region, true); + } catch (HBaseIOException e) { + LOG.warn("Failed to get region plan", e); + } + if (newPlan == null) { + regionStates.updateRegionState(region, State.FAILED_OPEN); + LOG.warn("Unable to find a viable location to assign region " + + region.getRegionNameAsString()); + return; + } + + if (plan != newPlan && !plan.getDestination().equals(newPlan.getDestination())) { + // Clean out plan we failed execute and one that doesn't look like it'll + // succeed anyways; we need a new plan! + // Transition back to OFFLINE + regionStates.updateRegionState(region, State.OFFLINE); + plan = newPlan; + } else if(plan.getDestination().equals(newPlan.getDestination()) && + previousException instanceof FailedServerException) { + try { + LOG.info("Trying to re-assign " + region.getRegionNameAsString() + + " to the same failed server."); + Thread.sleep(1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, + RpcClient.FAILED_SERVER_EXPIRY_DEFAULT)); + } catch (InterruptedException ie) { + LOG.warn("Failed to assign " + + region.getRegionNameAsString() + " since interrupted", ie); + regionStates.updateRegionState(region, State.FAILED_OPEN); + Thread.currentThread().interrupt(); + return; } } } - // Run out of attempts - regionStates.updateRegionState(region, State.FAILED_OPEN); - } finally { - metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTime() - startTime); } + // Run out of attempts + regionStates.updateRegionState(region, State.FAILED_OPEN); } private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) { @@ -1961,11 +1949,6 @@ public class AssignmentManager { oldestRITTime = ritTime; } } - if (this.metricsAssignmentManager != null) { - this.metricsAssignmentManager.updateRITOldestAge(oldestRITTime); - this.metricsAssignmentManager.updateRITCount(totalRITs); - this.metricsAssignmentManager.updateRITCountOverThreshold(totalRITsOverThreshold); - } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index bdb19f4..811f1d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -258,8 +258,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server { /** Namespace stuff */ private TableNamespaceManager tableNamespaceManager; - // Metrics for the HMaster - final MetricsMaster metricsMaster; // file system manager for the master FS operations private MasterFileSystem fileSystemManager; @@ -397,8 +395,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // should we check encryption settings at master side, default true this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true); - this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this)); - // preload table descriptor at startup this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true); @@ -563,10 +559,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } } - MetricsMaster getMasterMetrics() { - return metricsMaster; - } - /** * Initialize all ZK based system trackers. * @throws IOException @@ -584,7 +576,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this); this.regionNormalizerTracker.start(); this.assignmentManager = new AssignmentManager(this, serverManager, - this.balancer, this.service, this.metricsMaster, + this.balancer, this.service, this.tableLockManager, tableStateManager); this.regionServerTracker = new RegionServerTracker(zooKeeper, this, @@ -611,8 +603,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server { this.mpmHost.register(this.snapshotManager); this.mpmHost.register(new MasterFlushTableProcedureManager()); this.mpmHost.loadProcedures(conf); - this.mpmHost.initialize(this, this.metricsMaster); - } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 43ae2f8..78b781e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -74,8 +74,6 @@ public class MasterFileSystem { Configuration conf; // master status Server master; - // metrics for master - private final MetricsMasterFileSystem metricsMasterFilesystem = new MetricsMasterFileSystem(); // Persisted unique cluster ID private ClusterId clusterId; // Keep around for convenience. @@ -399,14 +397,6 @@ public class MasterFileSystem { splitTime = EnvironmentEdgeManager.currentTime(); splitLogSize = splitLogManager.splitLogDistributed(serverNames, logDirs, filter); splitTime = EnvironmentEdgeManager.currentTime() - splitTime; - - if (this.metricsMasterFilesystem != null) { - if (filter == META_FILTER) { - this.metricsMasterFilesystem.addMetaWALSplit(splitTime, splitLogSize); - } else { - this.metricsMasterFilesystem.addSplit(splitTime, splitLogSize); - } - } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index b269c3d..b649d67 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -342,11 +342,6 @@ public class MasterRpcServices extends RSRpcServices ServerName serverName = ProtobufUtil.toServerName(request.getServer()); ServerLoad oldLoad = master.serverManager.getLoad(serverName); master.serverManager.regionServerReport(serverName, new ServerLoad(sl)); - if (sl != null && master.metricsMaster != null) { - // Up our metrics. - master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests() - - (oldLoad != null ? oldLoad.getTotalNumberOfRequests() : 0)); - } } catch (IOException ioe) { throw new ServiceException(ioe); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java deleted file mode 100644 index 7b2423c..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -public class MetricsAssignmentManager { - - private final MetricsAssignmentManagerSource assignmentManagerSource; - - public MetricsAssignmentManager() { - assignmentManagerSource = CompatibilitySingletonFactory.getInstance( - MetricsAssignmentManagerSource.class); - } - - public void updateAssignmentTime(long time) { - assignmentManagerSource.updateAssignmentTime(time); - } - - public void updateBulkAssignTime(long time) { - assignmentManagerSource.updateBulkAssignTime(time); - } - - /** - * set new value for number of regions in transition. - * @param ritCount - */ - public void updateRITCount(int ritCount) { - assignmentManagerSource.setRIT(ritCount); - } - - /** - * update RIT count that are in this state for more than the threshold - * as defined by the property rit.metrics.threshold.time. - * @param ritCountOverThreshold - */ - public void updateRITCountOverThreshold(int ritCountOverThreshold) { - assignmentManagerSource.setRITCountOverThreshold(ritCountOverThreshold); - } - /** - * update the timestamp for oldest region in transition metrics. - * @param timestamp - */ - public void updateRITOldestAge(long timestamp) { - assignmentManagerSource.setRITOldestAge(timestamp); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java deleted file mode 100644 index d055853..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -/** - * This class is for maintaining the various master statistics - * and publishing them through the metrics interfaces. - *

- * This class has a number of metrics variables that are publicly accessible; - * these variables (objects) have methods to update their values. - */ -@InterfaceStability.Evolving -@InterfaceAudience.Private -public class MetricsMaster { - private static final Log LOG = LogFactory.getLog(MetricsMaster.class); - private MetricsMasterSource masterSource; - private MetricsMasterProcSource masterProcSource; - - public MetricsMaster(MetricsMasterWrapper masterWrapper) { - masterSource = CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class).create(masterWrapper); - masterProcSource = - CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class).create(masterWrapper); - } - - // for unit-test usage - public MetricsMasterSource getMetricsSource() { - return masterSource; - } - - public MetricsMasterProcSource getMetricsProcSource() { - return masterProcSource; - } - - /** - * @param inc How much to add to requests. - */ - public void incrementRequests(final long inc) { - masterSource.incRequests(inc); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java deleted file mode 100644 index 45dbeb8..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -public class MetricsMasterFileSystem { - - private final MetricsMasterFileSystemSource source; - - public MetricsMasterFileSystem() { - source = CompatibilitySingletonFactory.getInstance(MetricsMasterFileSystemSource.class); - } - - /** - * Record a single instance of a split - * @param time time that the split took - * @param size length of original WALs that were split - */ - public synchronized void addSplit(long time, long size) { - source.updateSplitTime(time); - source.updateSplitSize(size); - } - - /** - * Record a single instance of a split - * @param time time that the split took - * @param size length of original WALs that were split - */ - public synchronized void addMetaWALSplit(long time, long size) { - source.updateMetaWALSplitTime(time); - source.updateMetaWALSplitSize(size); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java deleted file mode 100644 index a935a37..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; - -/** - * Impl for exposing HMaster Information through JMX - */ -@InterfaceAudience.Private -public class MetricsMasterWrapperImpl implements MetricsMasterWrapper { - - private final HMaster master; - - public MetricsMasterWrapperImpl(final HMaster master) { - this.master = master; - } - - @Override - public double getAverageLoad() { - return master.getAverageLoad(); - } - - @Override - public String getClusterId() { - return master.getClusterId(); - } - - @Override - public String getZookeeperQuorum() { - ZooKeeperWatcher zk = master.getZooKeeper(); - if (zk == null) { - return ""; - } - return zk.getQuorum(); - } - - @Override - public String[] getCoprocessors() { - return master.getMasterCoprocessors(); - } - - @Override - public long getStartTime() { - return master.getMasterStartTime(); - } - - @Override - public long getActiveTime() { - return master.getMasterActiveTime(); - } - - @Override - public String getRegionServers() { - ServerManager serverManager = this.master.getServerManager(); - if (serverManager == null) { - return ""; - } - return StringUtils.join(serverManager.getOnlineServers().keySet(), ";"); - } - - @Override - public int getNumRegionServers() { - ServerManager serverManager = this.master.getServerManager(); - if (serverManager == null) { - return 0; - } - return serverManager.getOnlineServers().size(); - } - - @Override - public String getDeadRegionServers() { - ServerManager serverManager = this.master.getServerManager(); - if (serverManager == null) { - return ""; - } - return StringUtils.join(serverManager.getDeadServers().copyServerNames(), ";"); - } - - - @Override - public int getNumDeadRegionServers() { - ServerManager serverManager = this.master.getServerManager(); - if (serverManager == null) { - return 0; - } - return serverManager.getDeadServers().size(); - } - - @Override - public String getServerName() { - ServerName serverName = master.getServerName(); - if (serverName == null) { - return ""; - } - return serverName.getServerName(); - } - - @Override - public boolean getIsActiveMaster() { - return master.isActiveMaster(); - } - - @Override - public long getNumWALFiles() { - return master.getNumWALFiles(); - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshot.java deleted file mode 100644 index 2d7c797..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshot.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -public class MetricsSnapshot { - - private final MetricsSnapshotSource source; - - public MetricsSnapshot() { - source = CompatibilitySingletonFactory.getInstance(MetricsSnapshotSource.class); - } - - /** - * Record a single instance of a snapshot - * @param time time that the snapshot took - */ - public void addSnapshot(long time) { - source.updateSnapshotTime(time); - } - - /** - * Record a single instance of a snapshot - * @param time time that the snapshot restore took - */ - public void addSnapshotRestore(long time) { - source.updateSnapshotRestoreTime(time); - } - - /** - * Record a single instance of a snapshot cloned table - * @param time time that the snapshot clone took - */ - public void addSnapshotClone(long time) { - source.updateSnapshotCloneTime(time); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index 33e8d97..fa6c751 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -81,21 +81,6 @@ public abstract class BaseLoadBalancer implements LoadBalancer { return UNKNOWN_RACK; } } - - /** - * The constructor that uses the basic MetricsBalancer - */ - protected BaseLoadBalancer() { - metricsBalancer = new MetricsBalancer(); - } - - /** - * This Constructor accepts an instance of MetricsBalancer, - * which will be used instead of creating a new one - */ - protected BaseLoadBalancer(MetricsBalancer metricsBalancer) { - this.metricsBalancer = (metricsBalancer != null) ? metricsBalancer : new MetricsBalancer(); - } /** * An efficient array based implementation similar to ClusterState for keeping @@ -959,7 +944,6 @@ public abstract class BaseLoadBalancer implements LoadBalancer { "hbase.balancer.tablesOnMaster"; protected final Set tablesOnMaster = new HashSet(); - protected MetricsBalancer metricsBalancer = null; protected ClusterStatus clusterStatus = null; protected ServerName masterServerName; protected MasterServices services; @@ -1172,7 +1156,6 @@ public abstract class BaseLoadBalancer implements LoadBalancer { @Override public Map> roundRobinAssignment(List regions, List servers) { - metricsBalancer.incrMiscInvocations(); Map> assignments = assignMasterRegions(regions, servers); if (assignments != null && !assignments.isEmpty()) { servers = new ArrayList(servers); @@ -1290,7 +1273,6 @@ public abstract class BaseLoadBalancer implements LoadBalancer { @Override public Map immediateAssignment(List regions, List servers) { - metricsBalancer.incrMiscInvocations(); if (servers == null || servers.isEmpty()) { LOG.warn("Wanted to do random assignment but no servers to assign to"); return null; @@ -1308,7 +1290,6 @@ public abstract class BaseLoadBalancer implements LoadBalancer { */ @Override public ServerName randomAssignment(HRegionInfo regionInfo, List servers) { - metricsBalancer.incrMiscInvocations(); if (servers != null && servers.contains(masterServerName)) { if (shouldBeOnMaster(regionInfo)) { return masterServerName; @@ -1352,8 +1333,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { @Override public Map> retainAssignment(Map regions, List servers) { - // Update metrics - metricsBalancer.incrMiscInvocations(); + Map> assignments = assignMasterRegions(regions.keySet(), servers); if (assignments != null && !assignments.isEmpty()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java deleted file mode 100644 index 3707536..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master.balancer; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -/** - * Faced for exposing metrics about the balancer. - */ -public class MetricsBalancer { - - private MetricsBalancerSource source = null; - - public MetricsBalancer() { - initSource(); - } - - /** - * A function to instantiate the metrics source. This function can be overridden in its - * subclasses to provide extended sources - */ - protected void initSource() { - source = CompatibilitySingletonFactory.getInstance(MetricsBalancerSource.class); - } - - public void balanceCluster(long time) { - source.updateBalanceCluster(time); - } - - public void incrMiscInvocations() { - source.incrMiscInvocations(); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java deleted file mode 100644 index 850a9f5..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master.balancer; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -/** - * This metrics balancer uses extended source for stochastic load balancer - * to report its related metrics to JMX. For details, refer to HBASE-13965 - */ -public class MetricsStochasticBalancer extends MetricsBalancer { - /** - * Use the stochastic source instead of the default source. - */ - private MetricsStochasticBalancerSource stochasticSource = null; - - public MetricsStochasticBalancer() { - initSource(); - } - - /** - * This function overrides the initSource in the MetricsBalancer, use - * MetricsStochasticBalancerSource instead of the MetricsBalancerSource. - */ - @Override - protected void initSource() { - stochasticSource = - CompatibilitySingletonFactory.getInstance(MetricsStochasticBalancerSource.class); - } - - @Override - public void balanceCluster(long time) { - stochasticSource.updateBalanceCluster(time); - } - - @Override - public void incrMiscInvocations() { - stochasticSource.incrMiscInvocations(); - } - - /** - * Updates the number of metrics reported to JMX - */ - public void updateMetricsSize(int size) { - stochasticSource.updateMetricsSize(size); - } - - /** - * Reports stochastic load balancer costs to JMX - */ - public void updateStochasticCost(String tableName, String costFunctionName, - String costFunctionDesc, Double value) { - stochasticSource.updateStochasticCost(tableName, costFunctionName, costFunctionDesc, value); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index f9b3baf..d738bc2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -134,14 +134,6 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private RegionReplicaRackCostFunction regionReplicaRackCostFunction; private boolean isByTable = false; private TableName tableName = null; - - /** - * The constructor that pass a MetricsStochasticBalancer to BaseLoadBalancer to replace its - * default MetricsBalancer - */ - public StochasticLoadBalancer() { - super(new MetricsStochasticBalancer()); - } @Override public void onConfigurationChange(Configuration conf) { @@ -216,28 +208,9 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { for(CostFromRegionLoadFunction cost : regionLoadFunctions) { cost.setClusterStatus(st); } - - // update metrics size - try { - // by-table or ensemble mode - int tablesCount = isByTable ? services.getTableDescriptors().getAll().size() : 1; - int functionsCount = getCostFunctionNames().length; - - updateMetricsSize(tablesCount * (functionsCount + 1)); // +1 for overall - } catch (Exception e) { - LOG.error("failed to get the size of all tables, exception = " + e.getMessage()); - } - } - - /** - * Update the number of metrics that are reported to JMX - */ - public void updateMetricsSize(int size) { - if (metricsBalancer instanceof MetricsStochasticBalancer) { - ((MetricsStochasticBalancer) metricsBalancer).updateMetricsSize(size); - } } + @Override public synchronized void setMasterServices(MasterServices masterServices) { super.setMasterServices(masterServices); @@ -356,10 +329,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { } long endTime = EnvironmentEdgeManager.currentTime(); - metricsBalancer.balanceCluster(endTime - startTime); - // update costs metrics - updateStochasticCosts(tableName, curOverallCost, curFunctionCosts); if (initCost > currentCost) { plans = createRegionPlans(cluster); if (LOG.isDebugEnabled()) { @@ -379,31 +349,6 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { } return null; } - - /** - * update costs to JMX - */ - private void updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) { - if (tableName == null) return; - - // check if the metricsBalancer is MetricsStochasticBalancer before casting - if (metricsBalancer instanceof MetricsStochasticBalancer) { - MetricsStochasticBalancer balancer = (MetricsStochasticBalancer) metricsBalancer; - // overall cost - balancer.updateStochasticCost(tableName.getNameAsString(), - "Overall", "Overall cost", overall); - - // each cost function - for (int i = 0; i < costFunctions.length; i++) { - CostFunction costFunction = costFunctions[i]; - String costFunctionName = costFunction.getClass().getSimpleName(); - Double costPercent = (overall == 0) ? 0 : (subCosts[i] / overall); - // TODO: cost function may need a specific description - balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName, - "The percent of " + costFunctionName, costPercent); - } - } - } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java index 2a6dca8..b5d15db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.MetricsSnapshot; import org.apache.hadoop.hbase.master.SnapshotSentinel; import org.apache.hadoop.hbase.master.handler.CreateTableHandler; import org.apache.hadoop.hbase.monitoring.MonitoredTask; @@ -65,7 +64,6 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot private final SnapshotDescription snapshot; private final ForeignExceptionDispatcher monitor; - private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot(); private final MonitoredTask status; private RestoreSnapshotHelper.RestoreMetaChanges metaChanges; @@ -154,7 +152,6 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot } else { status.markComplete("Snapshot '"+ snapshot.getName() +"' clone completed and table enabled!"); } - metricsSnapshot.addSnapshotClone(status.getCompletionTimestamp() - status.getStartTime()); super.completed(exception); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java index 56faf76..a4cb88d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.MetricsSnapshot; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.SnapshotSentinel; import org.apache.hadoop.hbase.master.handler.TableEventHandler; @@ -67,7 +66,6 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho private final SnapshotDescription snapshot; private final ForeignExceptionDispatcher monitor; - private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot(); private final MonitoredTask status; private volatile boolean stopped = false; @@ -203,7 +201,6 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho } else { status.markComplete("Restore snapshot '"+ snapshot.getName() +"'!"); } - metricsSnapshot.addSnapshotRestore(status.getCompletionTimestamp() - status.getStartTime()); super.completed(exception); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index d430493..ee4dadc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -52,7 +52,6 @@ import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.hadoop.hbase.master.SnapshotSentinel; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner; @@ -162,7 +161,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * @param coordinator procedure coordinator instance. exposed for testing. * @param pool HBase ExecutorServcie instance, exposed for testing. */ - public SnapshotManager(final MasterServices master, final MetricsMaster metricsMaster, + public SnapshotManager(final MasterServices master, ProcedureCoordinator coordinator, ExecutorService pool) throws IOException, UnsupportedOperationException { this.master = master; @@ -1058,7 +1057,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable } @Override - public void initialize(MasterServices master, MetricsMaster metricsMaster) throws KeeperException, + public void initialize(MasterServices master) throws KeeperException, IOException, UnsupportedOperationException { this.master = master; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java index 5fd4aaa..6f5a050 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.MetricsSnapshot; import org.apache.hadoop.hbase.master.SnapshotSentinel; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; @@ -73,7 +72,6 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh // none of these should ever be null protected final MasterServices master; - protected final MetricsSnapshot metricsSnapshot = new MetricsSnapshot(); protected final SnapshotDescription snapshot; protected final Configuration conf; protected final FileSystem fs; @@ -204,7 +202,6 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed"; status.markComplete(msg); LOG.info(msg); - metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime()); } catch (Exception e) { status.abort("Failed to complete snapshot " + snapshot.getName() + " on table " + snapshotTable + " because " + e.getMessage()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java index 8f866f6..47f2ab2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.zookeeper.KeeperException; @@ -64,7 +63,7 @@ public abstract class MasterProcedureManager extends ProcedureManager implements * @throws IOException * @throws UnsupportedOperationException */ - public abstract void initialize(MasterServices master, MetricsMaster metricsMaster) + public abstract void initialize(MasterServices master) throws KeeperException, IOException, UnsupportedOperationException; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java index 8161ffe..d75af63 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java @@ -22,7 +22,6 @@ import java.util.Hashtable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.zookeeper.KeeperException; /** @@ -44,10 +43,10 @@ public class MasterProcedureManagerHost extends } } - public void initialize(MasterServices master, final MetricsMaster metricsMaster) + public void initialize(MasterServices master) throws KeeperException, IOException, UnsupportedOperationException { for (MasterProcedureManager mpm : getProcedureManagers()) { - mpm.initialize(master, metricsMaster); + mpm.initialize(master); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java index e72da2a..9c70baf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.hadoop.hbase.procedure.MasterProcedureManager; import org.apache.hadoop.hbase.procedure.Procedure; import org.apache.hadoop.hbase.procedure.ProcedureCoordinator; @@ -85,7 +84,7 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager { } @Override - public void initialize(MasterServices master, MetricsMaster metricsMaster) + public void initialize(MasterServices master) throws KeeperException, IOException, UnsupportedOperationException { this.master = master; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 9549a13..af625d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -593,8 +593,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private RegionSplitPolicy splitPolicy; private FlushPolicy flushPolicy; - private final MetricsRegion metricsRegion; - private final MetricsRegionWrapperImpl metricsRegionWrapper; private final Durability durability; private final boolean regionStatsEnabled; @@ -716,8 +714,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // don't initialize coprocessors if not running within a regionserver // TODO: revisit if coprocessors should load in other cases this.coprocessorHost = new RegionCoprocessorHost(this, rsServices, conf); - this.metricsRegionWrapper = new MetricsRegionWrapperImpl(this); - this.metricsRegion = new MetricsRegion(this.metricsRegionWrapper); Map recoveringRegions = rsServices.getRecoveringRegions(); String encodedName = getRegionInfo().getEncodedName(); @@ -725,9 +721,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.recovering = true; recoveringRegions.put(encodedName, this); } - } else { - this.metricsRegionWrapper = null; - this.metricsRegion = null; } if (LOG.isDebugEnabled()) { // Write out region name as string and its encoded name. @@ -1167,11 +1160,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } @Override - public MetricsRegion getMetrics() { - return metricsRegion; - } - - @Override public boolean isClosed() { return this.closed.get(); } @@ -1524,12 +1512,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi status.setStatus("Running coprocessor post-close hooks"); this.coprocessorHost.postClose(abort); } - if (this.metricsRegion != null) { - this.metricsRegion.close(); - } - if (this.metricsRegionWrapper != null) { - Closeables.closeQuietly(this.metricsRegionWrapper); - } // stop the Compacted hfile discharger if (this.compactedFileDischarger != null) this.compactedFileDischarger.cancel(true); @@ -3326,18 +3308,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Total time taken might be involving Puts and Deletes. // Split the time for puts and deletes based on the total number of Puts and Deletes. - if (noOfPuts > 0) { - // There were some Puts in the batch. - if (this.metricsRegion != null) { - this.metricsRegion.updatePut(); - } - } - if (noOfDeletes > 0) { - // There were some Deletes in the batch. - if (this.metricsRegion != null) { - this.metricsRegion.updateDelete(); - } - } if (!success) { for (int i = firstIndex; i < lastIndexExclusive; i++) { if (batchOp.retCodeDetails[i].getOperationStatusCode() == OperationStatusCode.NOT_RUN) { @@ -6746,23 +6716,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi coprocessorHost.postGet(get, results); } - metricsUpdateForGet(results); - return results; } - void metricsUpdateForGet(List results) { - if (this.metricsRegion != null) { - long totalSize = 0L; - for (Cell cell : results) { - // This should give an estimate of the cell in the result. Why do we need - // to know the serialization of how the codec works with it?? - totalSize += CellUtil.estimatedSerializedSizeOf(cell); - } - this.metricsRegion.updateGet(totalSize); - } - } - @Override public void mutateRow(RowMutations rm) throws IOException { // Don't need nonces here - RowMutations only supports puts and deletes @@ -7299,10 +7255,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi closeRegionOperation(op); } - if (this.metricsRegion != null) { - this.metricsRegion.updateAppend(); - } - if (flush) { // Request a cache flush. Do it outside update lock. requestFlush(); @@ -7523,9 +7475,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi mvcc.completeAndWait(writeEntry); } closeRegionOperation(Operation.INCREMENT); - if (this.metricsRegion != null) { - this.metricsRegion.updateIncrement(); - } } if (flush) { @@ -7551,7 +7500,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi public static final long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + ClassSize.ARRAY + - 44 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT + + 42 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT + (14 * Bytes.SIZEOF_LONG) + 5 * Bytes.SIZEOF_BOOLEAN); @@ -7563,8 +7512,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // 1 x HRegion$WriteState - writestate // 1 x RegionCoprocessorHost - coprocessorHost // 1 x RegionSplitPolicy - splitPolicy - // 1 x MetricsRegion - metricsRegion - // 1 x MetricsRegionWrapperImpl - metricsRegionWrapper public static final long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.OBJECT + // closeLock (2 * ClassSize.ATOMIC_BOOLEAN) + // closed, closing diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 211fed5..621639f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -137,7 +137,6 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler; import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler; import org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler; -import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; import org.apache.hadoop.hbase.security.Superusers; @@ -339,7 +338,6 @@ public class HRegionServer extends HasThread implements /** region server process name */ public static final String REGIONSERVER = "regionserver"; - MetricsRegionServer metricsRegionServer; private SpanReceiverHost spanReceiverHost; /** @@ -1175,21 +1173,14 @@ public class HRegionServer extends HasThread implements ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) throws IOException { - // We're getting the MetricsRegionServerWrapper here because the wrapper computes requests - // per second, and other metrics As long as metrics are part of ServerLoad it's best to use - // the wrapper to compute those numbers in one place. - // In the long term most of these should be moved off of ServerLoad and the heart beat. - // Instead they should be stored in an HBase table so that external visibility into HBase is - // improved; Additionally the load balancer will be able to take advantage of a more complete - // history. - MetricsRegionServerWrapper regionServerWrapper = metricsRegionServer.getRegionServerWrapper(); + Collection regions = getOnlineRegionsLocalContext(); MemoryUsage memory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); ClusterStatusProtos.ServerLoad.Builder serverLoad = ClusterStatusProtos.ServerLoad.newBuilder(); - serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond()); - serverLoad.setTotalNumberOfRequests((int) regionServerWrapper.getTotalRequestCount()); + serverLoad.setNumberOfRequests((int) 0); + serverLoad.setTotalNumberOfRequests((int) 0); serverLoad.setUsedHeapMB((int)(memory.getUsed() / 1024 / 1024)); serverLoad.setMaxHeapMB((int) (memory.getMax() / 1024 / 1024)); Set coprocessors = getWAL(null).getCoprocessorHost().getCoprocessors(); @@ -1381,9 +1372,6 @@ public class HRegionServer extends HasThread implements this.cacheConfig = new CacheConfig(conf); this.walFactory = setupWALAndReplication(); - // Init in here rather than in constructor after thread name has been set - this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this)); - startServiceThreads(); startHeapMemoryManager(); LOG.info("Serving as " + this.serverName + @@ -1639,7 +1627,6 @@ public class HRegionServer extends HasThread implements // listeners the wal factory will add to wals it creates. final List listeners = new ArrayList(); - listeners.add(new MetricsWAL()); if (this.replicationSourceHandler != null && this.replicationSourceHandler.getWALActionsListener() != null) { // Replication handler is an implementation of WALActionsListener. @@ -1677,10 +1664,6 @@ public class HRegionServer extends HasThread implements return roller; } - public MetricsRegionServer getRegionServerMetrics() { - return this.metricsRegionServer; - } - /** * @return Master address tracker instance. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 40c5046..f6193a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -520,7 +520,6 @@ class MemStoreFlusher implements FlushRequester { } if (flushResult.isFlushSucceeded()) { long endTime = EnvironmentEdgeManager.currentTime(); - server.metricsRegionServer.updateFlushTime(endTime - startTime); } } catch (DroppedSnapshotException ex) { // Cache flush can fail in a few places. If it fails in a critical diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java deleted file mode 100644 index 48395a3..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - - -/** - * This is the glue between the HRegion and whatever hadoop shim layer - * is loaded (hbase-hadoop1-compat or hbase-hadoop2-compat). - */ -@InterfaceAudience.Private -public class MetricsRegion { - private final MetricsRegionSource source; - private MetricsRegionWrapper regionWrapper; - - public MetricsRegion(final MetricsRegionWrapper wrapper) { - source = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) - .createRegion(wrapper); - this.regionWrapper = wrapper; - } - - public void close() { - source.close(); - } - - public void updatePut() { - source.updatePut(); - } - - public void updateDelete() { - source.updateDelete(); - } - - public void updateGet(final long getSize) { - source.updateGet(getSize); - } - - public void updateScanNext(final long scanSize) { - source.updateScan(scanSize); - } - - public void updateAppend() { - source.updateAppend(); - } - - public void updateIncrement() { - source.updateIncrement(); - } - - MetricsRegionSource getSource() { - return source; - } - - public MetricsRegionWrapper getRegionWrapper() { - return regionWrapper; - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java deleted file mode 100644 index 91f494a..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -/** - *

- * This class is for maintaining the various regionserver statistics - * and publishing them through the metrics interfaces. - *

- * This class has a number of metrics variables that are publicly accessible; - * these variables (objects) have methods to update their values. - */ -@InterfaceStability.Evolving -@InterfaceAudience.Private -public class MetricsRegionServer { - private MetricsRegionServerSource serverSource; - private MetricsRegionServerWrapper regionServerWrapper; - - public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper) { - this(regionServerWrapper, - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) - .createServer(regionServerWrapper)); - - } - - MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, - MetricsRegionServerSource serverSource) { - this.regionServerWrapper = regionServerWrapper; - this.serverSource = serverSource; - } - - @VisibleForTesting - public MetricsRegionServerSource getMetricsSource() { - return serverSource; - } - - public MetricsRegionServerWrapper getRegionServerWrapper() { - return regionServerWrapper; - } - - public void updatePut(long t) { - if (t > 1000) { - serverSource.incrSlowPut(); - } - serverSource.updatePut(t); - } - - public void updateDelete(long t) { - if (t > 1000) { - serverSource.incrSlowDelete(); - } - serverSource.updateDelete(t); - } - - public void updateGet(long t) { - if (t > 1000) { - serverSource.incrSlowGet(); - } - serverSource.updateGet(t); - } - - public void updateIncrement(long t) { - if (t > 1000) { - serverSource.incrSlowIncrement(); - } - serverSource.updateIncrement(t); - } - - public void updateAppend(long t) { - if (t > 1000) { - serverSource.incrSlowAppend(); - } - serverSource.updateAppend(t); - } - - public void updateReplay(long t){ - serverSource.updateReplay(t); - } - - public void updateScannerNext(long scanSize){ - serverSource.updateScannerNext(scanSize); - } - - public void updateSplitTime(long t) { - serverSource.updateSplitTime(t); - } - - public void incrSplitRequest() { - serverSource.incrSplitRequest(); - } - - public void incrSplitSuccess() { - serverSource.incrSplitSuccess(); - } - - public void updateFlushTime(long t) { - serverSource.updateFlushTime(t); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java deleted file mode 100644 index f3e8916..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ /dev/null @@ -1,751 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HDFSBlocksDistribution; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.io.hfile.CacheStats; -import org.apache.hadoop.hbase.mob.MobCacheConfig; -import org.apache.hadoop.hbase.mob.MobFileCache; -import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.wal.WALProvider; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.hdfs.DFSHedgedReadMetrics; -import org.apache.hadoop.metrics2.MetricsExecutor; - -/** - * Impl for exposing HRegionServer Information through Hadoop's metrics 2 system. - */ -@InterfaceAudience.Private -class MetricsRegionServerWrapperImpl - implements MetricsRegionServerWrapper { - - private static final Log LOG = LogFactory.getLog(MetricsRegionServerWrapperImpl.class); - - private final HRegionServer regionServer; - private final MetricsWALSource metricsWALSource; - - private BlockCache blockCache; - private MobFileCache mobFileCache; - - private volatile long numStores = 0; - private volatile long numWALFiles = 0; - private volatile long walFileSize = 0; - private volatile long numStoreFiles = 0; - private volatile long memstoreSize = 0; - private volatile long storeFileSize = 0; - private volatile double requestsPerSecond = 0.0; - private volatile long readRequestsCount = 0; - private volatile long writeRequestsCount = 0; - private volatile long checkAndMutateChecksFailed = 0; - private volatile long checkAndMutateChecksPassed = 0; - private volatile long storefileIndexSize = 0; - private volatile long totalStaticIndexSize = 0; - private volatile long totalStaticBloomSize = 0; - private volatile long numMutationsWithoutWAL = 0; - private volatile long dataInMemoryWithoutWAL = 0; - private volatile double percentFileLocal = 0; - private volatile double percentFileLocalSecondaryRegions = 0; - private volatile long flushedCellsCount = 0; - private volatile long compactedCellsCount = 0; - private volatile long majorCompactedCellsCount = 0; - private volatile long flushedCellsSize = 0; - private volatile long compactedCellsSize = 0; - private volatile long majorCompactedCellsSize = 0; - private volatile long cellsCountCompactedToMob = 0; - private volatile long cellsCountCompactedFromMob = 0; - private volatile long cellsSizeCompactedToMob = 0; - private volatile long cellsSizeCompactedFromMob = 0; - private volatile long mobFlushCount = 0; - private volatile long mobFlushedCellsCount = 0; - private volatile long mobFlushedCellsSize = 0; - private volatile long mobScanCellsCount = 0; - private volatile long mobScanCellsSize = 0; - private volatile long mobFileCacheAccessCount = 0; - private volatile long mobFileCacheMissCount = 0; - private volatile double mobFileCacheHitRatio = 0; - private volatile long mobFileCacheEvictedCount = 0; - private volatile long mobFileCacheCount = 0; - private volatile long blockedRequestsCount = 0L; - - private CacheStats cacheStats; - private ScheduledExecutorService executor; - private Runnable runnable; - private long period; - - /** - * Can be null if not on hdfs. - */ - private DFSHedgedReadMetrics dfsHedgedReadMetrics; - - public MetricsRegionServerWrapperImpl(final HRegionServer regionServer) { - this.regionServer = regionServer; - initBlockCache(); - initMobFileCache(); - - this.period = - regionServer.conf.getLong(HConstants.REGIONSERVER_METRICS_PERIOD, - HConstants.DEFAULT_REGIONSERVER_METRICS_PERIOD); - - this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor(); - this.runnable = new RegionServerMetricsWrapperRunnable(); - this.executor.scheduleWithFixedDelay(this.runnable, this.period, this.period, - TimeUnit.MILLISECONDS); - this.metricsWALSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); - - try { - this.dfsHedgedReadMetrics = FSUtils.getDFSHedgedReadMetrics(regionServer.getConfiguration()); - } catch (IOException e) { - LOG.warn("Failed to get hedged metrics", e); - } - if (LOG.isInfoEnabled()) { - LOG.info("Computing regionserver metrics every " + this.period + " milliseconds"); - } - } - - /** - * It's possible that due to threading the block cache could not be initialized - * yet (testing multiple region servers in one jvm). So we need to try and initialize - * the blockCache and cacheStats reference multiple times until we succeed. - */ - private synchronized void initBlockCache() { - CacheConfig cacheConfig = this.regionServer.cacheConfig; - if (cacheConfig != null && this.blockCache == null) { - this.blockCache = cacheConfig.getBlockCache(); - } - - if (this.blockCache != null && this.cacheStats == null) { - this.cacheStats = blockCache.getStats(); - } - } - - /** - * Initializes the mob file cache. - */ - private synchronized void initMobFileCache() { - MobCacheConfig mobCacheConfig = this.regionServer.mobCacheConfig; - if (mobCacheConfig != null && this.mobFileCache == null) { - this.mobFileCache = mobCacheConfig.getMobFileCache(); - } - } - - @Override - public String getClusterId() { - return regionServer.getClusterId(); - } - - @Override - public long getStartCode() { - return regionServer.getStartcode(); - } - - @Override - public String getZookeeperQuorum() { - ZooKeeperWatcher zk = regionServer.getZooKeeper(); - if (zk == null) { - return ""; - } - return zk.getQuorum(); - } - - @Override - public String getCoprocessors() { - String[] coprocessors = regionServer.getRegionServerCoprocessors(); - if (coprocessors == null || coprocessors.length == 0) { - return ""; - } - return StringUtils.join(coprocessors, ", "); - } - - @Override - public String getServerName() { - ServerName serverName = regionServer.getServerName(); - if (serverName == null) { - return ""; - } - return serverName.getServerName(); - } - - @Override - public long getNumOnlineRegions() { - Collection onlineRegionsLocalContext = regionServer.getOnlineRegionsLocalContext(); - if (onlineRegionsLocalContext == null) { - return 0; - } - return onlineRegionsLocalContext.size(); - } - - @Override - public long getTotalRequestCount() { - return regionServer.rpcServices.requestCount.get(); - } - - @Override - public int getSplitQueueSize() { - if (this.regionServer.compactSplitThread == null) { - return 0; - } - return this.regionServer.compactSplitThread.getSplitQueueSize(); - } - - @Override - public int getCompactionQueueSize() { - //The thread could be zero. if so assume there is no queue. - if (this.regionServer.compactSplitThread == null) { - return 0; - } - return this.regionServer.compactSplitThread.getCompactionQueueSize(); - } - - @Override - public int getSmallCompactionQueueSize() { - //The thread could be zero. if so assume there is no queue. - if (this.regionServer.compactSplitThread == null) { - return 0; - } - return this.regionServer.compactSplitThread.getSmallCompactionQueueSize(); - } - - @Override - public int getLargeCompactionQueueSize() { - //The thread could be zero. if so assume there is no queue. - if (this.regionServer.compactSplitThread == null) { - return 0; - } - return this.regionServer.compactSplitThread.getLargeCompactionQueueSize(); - } - - @Override - public int getFlushQueueSize() { - //If there is no flusher there should be no queue. - if (this.regionServer.cacheFlusher == null) { - return 0; - } - return this.regionServer.cacheFlusher.getFlushQueueSize(); - } - - @Override - public long getBlockCacheCount() { - if (this.blockCache == null) { - return 0; - } - return this.blockCache.getBlockCount(); - } - - @Override - public long getBlockCacheSize() { - if (this.blockCache == null) { - return 0; - } - return this.blockCache.getCurrentSize(); - } - - @Override - public long getBlockCacheFreeSize() { - if (this.blockCache == null) { - return 0; - } - return this.blockCache.getFreeSize(); - } - - @Override - public long getBlockCacheHitCount() { - if (this.cacheStats == null) { - return 0; - } - return this.cacheStats.getHitCount(); - } - - @Override - public long getBlockCachePrimaryHitCount() { - if (this.cacheStats == null) { - return 0; - } - return this.cacheStats.getPrimaryHitCount(); - } - - @Override - public long getBlockCacheMissCount() { - if (this.cacheStats == null) { - return 0; - } - return this.cacheStats.getMissCount(); - } - - @Override - public long getBlockCachePrimaryMissCount() { - if (this.cacheStats == null) { - return 0; - } - return this.cacheStats.getPrimaryMissCount(); - } - - @Override - public long getBlockCacheEvictedCount() { - if (this.cacheStats == null) { - return 0; - } - return this.cacheStats.getEvictedCount(); - } - - @Override - public long getBlockCachePrimaryEvictedCount() { - if (this.cacheStats == null) { - return 0; - } - return this.cacheStats.getPrimaryEvictedCount(); - } - - @Override - public double getBlockCacheHitPercent() { - if (this.cacheStats == null) { - return 0; - } - double ratio = this.cacheStats.getHitRatio(); - if (Double.isNaN(ratio)) { - ratio = 0; - } - return (ratio * 100); - } - - @Override - public double getBlockCacheHitCachingPercent() { - if (this.cacheStats == null) { - return 0; - } - - double ratio = this.cacheStats.getHitCachingRatio(); - - if (Double.isNaN(ratio)) { - ratio = 0; - } - return (ratio * 100); - } - - @Override - public long getBlockCacheFailedInsertions() { - return this.cacheStats.getFailedInserts(); - } - - @Override public void forceRecompute() { - this.runnable.run(); - } - - @Override - public long getNumStores() { - return numStores; - } - - @Override - public long getNumWALFiles() { - return numWALFiles; - } - - @Override - public long getWALFileSize() { - return walFileSize; - } - - @Override - public long getNumWALSlowAppend() { - return metricsWALSource.getSlowAppendCount(); - } - - @Override - public long getNumStoreFiles() { - return numStoreFiles; - } - - @Override - public long getMemstoreSize() { - return memstoreSize; - } - - @Override - public long getStoreFileSize() { - return storeFileSize; - } - - @Override public double getRequestsPerSecond() { - return requestsPerSecond; - } - - @Override - public long getReadRequestsCount() { - return readRequestsCount; - } - - @Override - public long getWriteRequestsCount() { - return writeRequestsCount; - } - - @Override - public long getCheckAndMutateChecksFailed() { - return checkAndMutateChecksFailed; - } - - @Override - public long getCheckAndMutateChecksPassed() { - return checkAndMutateChecksPassed; - } - - @Override - public long getStoreFileIndexSize() { - return storefileIndexSize; - } - - @Override - public long getTotalStaticIndexSize() { - return totalStaticIndexSize; - } - - @Override - public long getTotalStaticBloomSize() { - return totalStaticBloomSize; - } - - @Override - public long getNumMutationsWithoutWAL() { - return numMutationsWithoutWAL; - } - - @Override - public long getDataInMemoryWithoutWAL() { - return dataInMemoryWithoutWAL; - } - - @Override - public double getPercentFileLocal() { - return percentFileLocal; - } - - @Override - public double getPercentFileLocalSecondaryRegions() { - return percentFileLocalSecondaryRegions; - } - - @Override - public long getUpdatesBlockedTime() { - if (this.regionServer.cacheFlusher == null) { - return 0; - } - return this.regionServer.cacheFlusher.getUpdatesBlockedMsHighWater().get(); - } - - @Override - public long getFlushedCellsCount() { - return flushedCellsCount; - } - - @Override - public long getCompactedCellsCount() { - return compactedCellsCount; - } - - @Override - public long getMajorCompactedCellsCount() { - return majorCompactedCellsCount; - } - - @Override - public long getFlushedCellsSize() { - return flushedCellsSize; - } - - @Override - public long getCompactedCellsSize() { - return compactedCellsSize; - } - - @Override - public long getMajorCompactedCellsSize() { - return majorCompactedCellsSize; - } - - @Override - public long getCellsCountCompactedFromMob() { - return cellsCountCompactedFromMob; - } - - @Override - public long getCellsCountCompactedToMob() { - return cellsCountCompactedToMob; - } - - @Override - public long getCellsSizeCompactedFromMob() { - return cellsSizeCompactedFromMob; - } - - @Override - public long getCellsSizeCompactedToMob() { - return cellsSizeCompactedToMob; - } - - @Override - public long getMobFlushCount() { - return mobFlushCount; - } - - @Override - public long getMobFlushedCellsCount() { - return mobFlushedCellsCount; - } - - @Override - public long getMobFlushedCellsSize() { - return mobFlushedCellsSize; - } - - @Override - public long getMobScanCellsCount() { - return mobScanCellsCount; - } - - @Override - public long getMobScanCellsSize() { - return mobScanCellsSize; - } - - @Override - public long getMobFileCacheAccessCount() { - return mobFileCacheAccessCount; - } - - @Override - public long getMobFileCacheMissCount() { - return mobFileCacheMissCount; - } - - @Override - public long getMobFileCacheCount() { - return mobFileCacheCount; - } - - @Override - public long getMobFileCacheEvictedCount() { - return mobFileCacheEvictedCount; - } - - @Override - public double getMobFileCacheHitPercent() { - return mobFileCacheHitRatio * 100; - } - - /** - * This is the runnable that will be executed on the executor every PERIOD number of seconds - * It will take metrics/numbers from all of the regions and use them to compute point in - * time metrics. - */ - public class RegionServerMetricsWrapperRunnable implements Runnable { - - private long lastRan = 0; - private long lastRequestCount = 0; - - @Override - synchronized public void run() { - try { - initBlockCache(); - initMobFileCache(); - cacheStats = blockCache.getStats(); - - HDFSBlocksDistribution hdfsBlocksDistribution = - new HDFSBlocksDistribution(); - HDFSBlocksDistribution hdfsBlocksDistributionSecondaryRegions = - new HDFSBlocksDistribution(); - - long tempNumStores = 0, tempNumStoreFiles = 0, tempMemstoreSize = 0, tempStoreFileSize = 0; - long tempReadRequestsCount = 0, tempWriteRequestsCount = 0; - long tempCheckAndMutateChecksFailed = 0; - long tempCheckAndMutateChecksPassed = 0; - long tempStorefileIndexSize = 0; - long tempTotalStaticIndexSize = 0; - long tempTotalStaticBloomSize = 0; - long tempNumMutationsWithoutWAL = 0; - long tempDataInMemoryWithoutWAL = 0; - double tempPercentFileLocal = 0; - double tempPercentFileLocalSecondaryRegions = 0; - long tempFlushedCellsCount = 0; - long tempCompactedCellsCount = 0; - long tempMajorCompactedCellsCount = 0; - long tempFlushedCellsSize = 0; - long tempCompactedCellsSize = 0; - long tempMajorCompactedCellsSize = 0; - long tempCellsCountCompactedToMob = 0; - long tempCellsCountCompactedFromMob = 0; - long tempCellsSizeCompactedToMob = 0; - long tempCellsSizeCompactedFromMob = 0; - long tempMobFlushCount = 0; - long tempMobFlushedCellsCount = 0; - long tempMobFlushedCellsSize = 0; - long tempMobScanCellsCount = 0; - long tempMobScanCellsSize = 0; - long tempBlockedRequestsCount = 0; - - for (Region r : regionServer.getOnlineRegionsLocalContext()) { - tempNumMutationsWithoutWAL += r.getNumMutationsWithoutWAL(); - tempDataInMemoryWithoutWAL += r.getDataInMemoryWithoutWAL(); - tempReadRequestsCount += r.getReadRequestsCount(); - tempWriteRequestsCount += r.getWriteRequestsCount(); - tempCheckAndMutateChecksFailed += r.getCheckAndMutateChecksFailed(); - tempCheckAndMutateChecksPassed += r.getCheckAndMutateChecksPassed(); - tempBlockedRequestsCount += r.getBlockedRequestsCount(); - List storeList = r.getStores(); - tempNumStores += storeList.size(); - for (Store store : storeList) { - tempNumStoreFiles += store.getStorefilesCount(); - tempMemstoreSize += store.getMemStoreSize(); - tempStoreFileSize += store.getStorefilesSize(); - tempStorefileIndexSize += store.getStorefilesIndexSize(); - tempTotalStaticBloomSize += store.getTotalStaticBloomSize(); - tempTotalStaticIndexSize += store.getTotalStaticIndexSize(); - tempFlushedCellsCount += store.getFlushedCellsCount(); - tempCompactedCellsCount += store.getCompactedCellsCount(); - tempMajorCompactedCellsCount += store.getMajorCompactedCellsCount(); - tempFlushedCellsSize += store.getFlushedCellsSize(); - tempCompactedCellsSize += store.getCompactedCellsSize(); - tempMajorCompactedCellsSize += store.getMajorCompactedCellsSize(); - if (store instanceof HMobStore) { - HMobStore mobStore = (HMobStore) store; - tempCellsCountCompactedToMob += mobStore.getCellsCountCompactedToMob(); - tempCellsCountCompactedFromMob += mobStore.getCellsCountCompactedFromMob(); - tempCellsSizeCompactedToMob += mobStore.getCellsSizeCompactedToMob(); - tempCellsSizeCompactedFromMob += mobStore.getCellsSizeCompactedFromMob(); - tempMobFlushCount += mobStore.getMobFlushCount(); - tempMobFlushedCellsCount += mobStore.getMobFlushedCellsCount(); - tempMobFlushedCellsSize += mobStore.getMobFlushedCellsSize(); - tempMobScanCellsCount += mobStore.getMobScanCellsCount(); - tempMobScanCellsSize += mobStore.getMobScanCellsSize(); - } - } - - HDFSBlocksDistribution distro = r.getHDFSBlocksDistribution(); - hdfsBlocksDistribution.add(distro); - if (r.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { - hdfsBlocksDistributionSecondaryRegions.add(distro); - } - } - float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex( - regionServer.getServerName().getHostname()); - tempPercentFileLocal = Double.isNaN(tempBlockedRequestsCount) ? 0 : (localityIndex * 100); - - float localityIndexSecondaryRegions = hdfsBlocksDistributionSecondaryRegions - .getBlockLocalityIndex(regionServer.getServerName().getHostname()); - tempPercentFileLocalSecondaryRegions = Double. - isNaN(localityIndexSecondaryRegions) ? 0 : (localityIndexSecondaryRegions * 100); - - // Compute the number of requests per second - long currentTime = EnvironmentEdgeManager.currentTime(); - - // assume that it took PERIOD seconds to start the executor. - // this is a guess but it's a pretty good one. - if (lastRan == 0) { - lastRan = currentTime - period; - } - // If we've time traveled keep the last requests per second. - if ((currentTime - lastRan) > 0) { - long currentRequestCount = getTotalRequestCount(); - requestsPerSecond = (currentRequestCount - lastRequestCount) / - ((currentTime - lastRan) / 1000.0); - lastRequestCount = currentRequestCount; - } - lastRan = currentTime; - - WALProvider provider = regionServer.walFactory.getWALProvider(); - WALProvider metaProvider = regionServer.walFactory.getMetaWALProvider(); - numWALFiles = (provider == null ? 0 : provider.getNumLogFiles()) + - (metaProvider == null ? 0 : metaProvider.getNumLogFiles()); - walFileSize = (provider == null ? 0 : provider.getLogFileSize()) + - (provider == null ? 0 : provider.getLogFileSize()); - // Copy over computed values so that no thread sees half computed values. - numStores = tempNumStores; - numStoreFiles = tempNumStoreFiles; - memstoreSize = tempMemstoreSize; - storeFileSize = tempStoreFileSize; - readRequestsCount = tempReadRequestsCount; - writeRequestsCount = tempWriteRequestsCount; - checkAndMutateChecksFailed = tempCheckAndMutateChecksFailed; - checkAndMutateChecksPassed = tempCheckAndMutateChecksPassed; - storefileIndexSize = tempStorefileIndexSize; - totalStaticIndexSize = tempTotalStaticIndexSize; - totalStaticBloomSize = tempTotalStaticBloomSize; - numMutationsWithoutWAL = tempNumMutationsWithoutWAL; - dataInMemoryWithoutWAL = tempDataInMemoryWithoutWAL; - percentFileLocal = tempPercentFileLocal; - percentFileLocalSecondaryRegions = tempPercentFileLocalSecondaryRegions; - flushedCellsCount = tempFlushedCellsCount; - compactedCellsCount = tempCompactedCellsCount; - majorCompactedCellsCount = tempMajorCompactedCellsCount; - flushedCellsSize = tempFlushedCellsSize; - compactedCellsSize = tempCompactedCellsSize; - majorCompactedCellsSize = tempMajorCompactedCellsSize; - cellsCountCompactedToMob = tempCellsCountCompactedToMob; - cellsCountCompactedFromMob = tempCellsCountCompactedFromMob; - cellsSizeCompactedToMob = tempCellsSizeCompactedToMob; - cellsSizeCompactedFromMob = tempCellsSizeCompactedFromMob; - mobFlushCount = tempMobFlushCount; - mobFlushedCellsCount = tempMobFlushedCellsCount; - mobFlushedCellsSize = tempMobFlushedCellsSize; - mobScanCellsCount = tempMobScanCellsCount; - mobScanCellsSize = tempMobScanCellsSize; - mobFileCacheAccessCount = mobFileCache.getAccessCount(); - mobFileCacheMissCount = mobFileCache.getMissCount(); - mobFileCacheHitRatio = Double. - isNaN(mobFileCache.getHitRatio())?0:mobFileCache.getHitRatio(); - mobFileCacheEvictedCount = mobFileCache.getEvictedFileCount(); - mobFileCacheCount = mobFileCache.getCacheSize(); - blockedRequestsCount = tempBlockedRequestsCount; - } catch (Throwable e) { - LOG.warn("Caught exception! Will suppress and retry.", e); - } - } - } - - @Override - public long getHedgedReadOps() { - return this.dfsHedgedReadMetrics == null? 0: this.dfsHedgedReadMetrics.getHedgedReadOps(); - } - - @Override - public long getHedgedReadWins() { - return this.dfsHedgedReadMetrics == null? 0: this.dfsHedgedReadMetrics.getHedgedReadWins(); - } - - @Override - public long getBlockedRequestsCount() { - return blockedRequestsCount; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java deleted file mode 100644 index 08865e6..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java +++ /dev/null @@ -1,174 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -import java.io.Closeable; -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.metrics2.MetricsExecutor; - -@InterfaceAudience.Private -public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable { - - public static final int PERIOD = 45; - public static final String UNKNOWN = "unknown"; - - private final HRegion region; - private ScheduledExecutorService executor; - private Runnable runnable; - private long numStoreFiles; - private long memstoreSize; - private long storeFileSize; - - private ScheduledFuture regionMetricsUpdateTask; - - public MetricsRegionWrapperImpl(HRegion region) { - this.region = region; - this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor(); - this.runnable = new HRegionMetricsWrapperRunnable(); - this.regionMetricsUpdateTask = this.executor.scheduleWithFixedDelay(this.runnable, PERIOD, - PERIOD, TimeUnit.SECONDS); - } - - @Override - public String getTableName() { - HTableDescriptor tableDesc = this.region.getTableDesc(); - if (tableDesc == null) { - return UNKNOWN; - } - return tableDesc.getTableName().getQualifierAsString(); - } - - @Override - public String getNamespace() { - HTableDescriptor tableDesc = this.region.getTableDesc(); - if (tableDesc == null) { - return UNKNOWN; - } - return tableDesc.getTableName().getNamespaceAsString(); - } - - - @Override - public String getRegionName() { - HRegionInfo regionInfo = this.region.getRegionInfo(); - if (regionInfo == null) { - return UNKNOWN; - } - return regionInfo.getEncodedName(); - } - - @Override - public long getNumStores() { - Map stores = this.region.stores; - if (stores == null) { - return 0; - } - return stores.size(); - } - - @Override - public long getNumStoreFiles() { - return numStoreFiles; - } - - @Override - public long getMemstoreSize() { - return memstoreSize; - } - - @Override - public long getStoreFileSize() { - return storeFileSize; - } - - @Override - public long getReadRequestCount() { - return this.region.getReadRequestsCount(); - } - - @Override - public long getWriteRequestCount() { - return this.region.getWriteRequestsCount(); - } - - @Override - public long getNumFilesCompacted() { - return this.region.compactionNumFilesCompacted.get(); - } - - @Override - public long getNumBytesCompacted() { - return this.region.compactionNumBytesCompacted.get(); - } - - @Override - public long getNumCompactionsCompleted() { - return this.region.compactionsFinished.get(); - } - - @Override - public int getRegionHashCode() { - return this.region.hashCode(); - } - - public class HRegionMetricsWrapperRunnable implements Runnable { - - @Override - public void run() { - long tempNumStoreFiles = 0; - long tempMemstoreSize = 0; - long tempStoreFileSize = 0; - - if (region.stores != null) { - for (Store store : region.stores.values()) { - tempNumStoreFiles += store.getStorefilesCount(); - tempMemstoreSize += store.getMemStoreSize(); - tempStoreFileSize += store.getStorefilesSize(); - } - } - - numStoreFiles = tempNumStoreFiles; - memstoreSize = tempMemstoreSize; - storeFileSize = tempStoreFileSize; - } - } - - @Override - public void close() throws IOException { - regionMetricsUpdateTask.cancel(true); - } - - /** - * Get the replica id of this region. - */ - @Override - public int getReplicaId() { - return region.getRegionInfo().getReplicaId(); - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 3b254c0..3aeac67 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -592,10 +592,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, region.getCoprocessorHost().postAppend(append, r); } } - if (regionServer.metricsRegionServer != null) { - regionServer.metricsRegionServer.updateAppend( - EnvironmentEdgeManager.currentTime() - before); - } return r; } @@ -630,10 +626,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, r = region.getCoprocessorHost().postIncrement(increment, r); } } - if (regionServer.metricsRegionServer != null) { - regionServer.metricsRegionServer.updateIncrement( - EnvironmentEdgeManager.currentTime() - before); - } return r; } @@ -685,7 +677,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // Only report the exception once since there's only one request that // caused the exception. Otherwise this number will dominate the exceptions count. - rpcServer.getMetrics().exception(sizeIOE); } // Now that there's an exception is known to be created @@ -717,7 +708,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, .setName(result.getClass().getName()) .setValue(result.toByteString()))); } catch (IOException ioe) { - rpcServer.getMetrics().exception(ioe); resultOrExceptionBuilder.setException(ResponseConverter.buildException(ioe)); } } else if (action.hasMutation()) { @@ -770,7 +760,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // case the corresponding ResultOrException instance for the Put or Delete will be added // down in the doBatchOp method call rather than up here. } catch (IOException ie) { - rpcServer.getMetrics().exception(ie); resultOrExceptionBuilder = ResultOrException.newBuilder(). setException(ResponseConverter.buildException(ie)); } @@ -853,15 +842,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, builder.addResultOrException(getResultOrException(ie, mutations.get(i).getIndex())); } } - if (regionServer.metricsRegionServer != null) { - long after = EnvironmentEdgeManager.currentTime(); - if (batchContainsPuts) { - regionServer.metricsRegionServer.updatePut(after - before); - } - if (batchContainsDelete) { - regionServer.metricsRegionServer.updateDelete(after - before); - } - } } /** @@ -878,66 +858,54 @@ public class RSRpcServices implements HBaseRPCErrorHandler, final List mutations, long replaySeqId) throws IOException { long before = EnvironmentEdgeManager.currentTime(); boolean batchContainsPuts = false, batchContainsDelete = false; - try { - for (Iterator it = mutations.iterator(); it.hasNext();) { - WALSplitter.MutationReplay m = it.next(); + for (Iterator it = mutations.iterator(); it.hasNext();) { + WALSplitter.MutationReplay m = it.next(); - if (m.type == MutationType.PUT) { - batchContainsPuts = true; - } else { - batchContainsDelete = true; - } + if (m.type == MutationType.PUT) { + batchContainsPuts = true; + } else { + batchContainsDelete = true; + } - NavigableMap> map = m.mutation.getFamilyCellMap(); - List metaCells = map.get(WALEdit.METAFAMILY); - if (metaCells != null && !metaCells.isEmpty()) { - for (Cell metaCell : metaCells) { - CompactionDescriptor compactionDesc = WALEdit.getCompaction(metaCell); - boolean isDefaultReplica = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()); - HRegion hRegion = (HRegion)region; - if (compactionDesc != null) { - // replay the compaction. Remove the files from stores only if we are the primary - // region replica (thus own the files) - hRegion.replayWALCompactionMarker(compactionDesc, !isDefaultReplica, isDefaultReplica, - replaySeqId); - continue; - } - FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(metaCell); - if (flushDesc != null && !isDefaultReplica) { - hRegion.replayWALFlushMarker(flushDesc, replaySeqId); - continue; - } - RegionEventDescriptor regionEvent = WALEdit.getRegionEventDescriptor(metaCell); - if (regionEvent != null && !isDefaultReplica) { - hRegion.replayWALRegionEventMarker(regionEvent); - continue; - } - BulkLoadDescriptor bulkLoadEvent = WALEdit.getBulkLoadDescriptor(metaCell); - if (bulkLoadEvent != null) { - hRegion.replayWALBulkLoadEventMarker(bulkLoadEvent); - continue; - } + NavigableMap> map = m.mutation.getFamilyCellMap(); + List metaCells = map.get(WALEdit.METAFAMILY); + if (metaCells != null && !metaCells.isEmpty()) { + for (Cell metaCell : metaCells) { + CompactionDescriptor compactionDesc = WALEdit.getCompaction(metaCell); + boolean isDefaultReplica = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()); + HRegion hRegion = (HRegion)region; + if (compactionDesc != null) { + // replay the compaction. Remove the files from stores only if we are the primary + // region replica (thus own the files) + hRegion.replayWALCompactionMarker(compactionDesc, !isDefaultReplica, isDefaultReplica, + replaySeqId); + continue; + } + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(metaCell); + if (flushDesc != null && !isDefaultReplica) { + hRegion.replayWALFlushMarker(flushDesc, replaySeqId); + continue; + } + RegionEventDescriptor regionEvent = WALEdit.getRegionEventDescriptor(metaCell); + if (regionEvent != null && !isDefaultReplica) { + hRegion.replayWALRegionEventMarker(regionEvent); + continue; + } + BulkLoadDescriptor bulkLoadEvent = WALEdit.getBulkLoadDescriptor(metaCell); + if (bulkLoadEvent != null) { + hRegion.replayWALBulkLoadEventMarker(bulkLoadEvent); + continue; } - it.remove(); - } - } - requestCount.add(mutations.size()); - if (!region.getRegionInfo().isMetaTable()) { - regionServer.cacheFlusher.reclaimMemStoreMemory(); - } - return region.batchReplay(mutations.toArray( - new WALSplitter.MutationReplay[mutations.size()]), replaySeqId); - } finally { - if (regionServer.metricsRegionServer != null) { - long after = EnvironmentEdgeManager.currentTime(); - if (batchContainsPuts) { - regionServer.metricsRegionServer.updatePut(after - before); - } - if (batchContainsDelete) { - regionServer.metricsRegionServer.updateDelete(after - before); } + it.remove(); } } + requestCount.add(mutations.size()); + if (!region.getRegionInfo().isMetaTable()) { + regionServer.cacheFlusher.reclaimMemStoreMemory(); + } + return region.batchReplay(mutations.toArray( + new WALSplitter.MutationReplay[mutations.size()]), replaySeqId); } private void closeAllScanners() { @@ -1372,7 +1340,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, ((HRegion)region).flushcache(true, writeFlushWalMarker); if (flushResult.isFlushSucceeded()) { long endTime = EnvironmentEdgeManager.currentTime(); - regionServer.metricsRegionServer.updateFlushTime(endTime - startTime); } boolean compactionNeeded = flushResult.isCompactionNeeded(); if (compactionNeeded) { @@ -1516,14 +1483,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, FlushResult flushResult = regionA.flush(true); if (flushResult.isFlushSucceeded()) { long endTime = EnvironmentEdgeManager.currentTime(); - regionServer.metricsRegionServer.updateFlushTime(endTime - startTime); } startTime = EnvironmentEdgeManager.currentTime(); flushResult = regionB.flush(true); - if (flushResult.isFlushSucceeded()) { - long endTime = EnvironmentEdgeManager.currentTime(); - regionServer.metricsRegionServer.updateFlushTime(endTime - startTime); - } regionServer.compactSplitThread.requestRegionsMerge(regionA, regionB, forcible, masterSystemTime, RpcServer.getRequestUser()); return MergeRegionsResponse.newBuilder().build(); @@ -1850,11 +1812,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return ReplicateWALEntryResponse.newBuilder().build(); } catch (IOException ie) { throw new ServiceException(ie); - } finally { - if (regionServer.metricsRegionServer != null) { - regionServer.metricsRegionServer.updateReplay( - EnvironmentEdgeManager.currentTime() - before); - } } } @@ -1936,12 +1893,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, + "Replicas are auto-split when their primary is split."); } LOG.info("Splitting " + region.getRegionInfo().getRegionNameAsString()); - long startTime = EnvironmentEdgeManager.currentTime(); FlushResult flushResult = region.flush(true); - if (flushResult.isFlushSucceeded()) { - long endTime = EnvironmentEdgeManager.currentTime(); - regionServer.metricsRegionServer.updateFlushTime(endTime - startTime); - } byte[] splitPoint = null; if (request.hasSplitPoint()) { splitPoint = request.getSplitPoint().toByteArray(); @@ -2120,9 +2072,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } catch (IOException ie) { throw new ServiceException(ie); } finally { - if (regionServer.metricsRegionServer != null) { - regionServer.metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTime() - before); - } if (quota != null) { quota.close(); } @@ -2171,7 +2120,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (region.getCoprocessorHost() != null) { region.getCoprocessorHost().postGet(get, results); } - region.metricsUpdateForGet(results); return Result.create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale); } @@ -2217,7 +2165,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, region = getRegion(regionAction.getRegion()); quota = getQuotaManager().checkQuota(region, regionAction.getActionList()); } catch (IOException e) { - rpcServer.getMetrics().exception(e); regionActionResultBuilder.setException(ResponseConverter.buildException(e)); responseBuilder.addRegionActionResult(regionActionResultBuilder.build()); continue; // For this region it's a failure. @@ -2248,7 +2195,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, processed = Boolean.TRUE; } } catch (IOException e) { - rpcServer.getMetrics().exception(e); // As it's atomic, we may expect it's a global failure. regionActionResultBuilder.setException(ResponseConverter.buildException(e)); } @@ -2705,10 +2651,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } region.updateReadRequestsCount(i); long responseCellSize = context != null ? context.getResponseCellSize() : 0; - region.getMetrics().updateScanNext(responseCellSize); - if (regionServer.metricsRegionServer != null) { - regionServer.metricsRegionServer.updateScannerNext(responseCellSize); - } } finally { region.closeRegionOperation(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 6d87057..470731a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -205,8 +205,6 @@ public interface Region extends ConfigurationObserver { /** @return the number of failed checkAndMutate guards */ long getCheckAndMutateChecksFailed(); - /** @return the MetricsRegion for this region */ - MetricsRegion getMetrics(); /** @return the block distribution for all Stores managed by this region */ HDFSBlocksDistribution getHDFSBlocksDistribution(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java index 91a5f37..016482b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java @@ -60,7 +60,6 @@ class SplitRequest implements Runnable { private void doSplitting(User user) { boolean success = false; - server.metricsRegionServer.incrSplitRequest(); long startTime = EnvironmentEdgeManager.currentTime(); SplitTransactionImpl st = new SplitTransactionImpl(parent, midKey); try { @@ -130,9 +129,7 @@ class SplitRequest implements Runnable { releaseTableLock(); long endTime = EnvironmentEdgeManager.currentTime(); // Update regionserver metrics with the split transaction total running time - server.metricsRegionServer.updateSplitTime(endTime - startTime); if (success) { - server.metricsRegionServer.incrSplitSuccess(); // Log success LOG.info("Region split, hbase:meta updated, and report to master. Parent=" + parent.getRegionInfo().getRegionNameAsString() + ", new regions: " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java deleted file mode 100644 index 24fd940..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver.wal; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.util.StringUtils; - -/** - * Class used to push numbers about the WAL into the metrics subsystem. This will take a - * single function call and turn it into multiple manipulations of the hadoop metrics system. - */ -@InterfaceAudience.Private -public class MetricsWAL extends WALActionsListener.Base { - private static final Log LOG = LogFactory.getLog(MetricsWAL.class); - - private final MetricsWALSource source; - - public MetricsWAL() { - this(CompatibilitySingletonFactory.getInstance(MetricsWALSource.class)); - } - - @VisibleForTesting - MetricsWAL(MetricsWALSource s) { - this.source = s; - } - - @Override - public void postSync(final long timeInNanos, final int handlerSyncs) { - source.incrementSyncTime(timeInNanos/1000000L); - } - - @Override - public void postAppend(final long size, final long time) { - source.incrementAppendCount(); - source.incrementAppendTime(time); - source.incrementAppendSize(size); - - if (time > 1000) { - source.incrementSlowAppendCount(); - LOG.warn(String.format("%s took %d ms appending an edit to wal; len~=%s", - Thread.currentThread().getName(), - time, - StringUtils.humanReadableInt(size))); - } - } - - @Override - public void logRollRequested(boolean underReplicated) { - source.incrementLogRollRequested(); - if (underReplicated) { - source.incrementLowReplicationLogRoll(); - } - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALEditsReplay.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALEditsReplay.java deleted file mode 100644 index b6b1d08..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALEditsReplay.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver.wal; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -/** - * Class used to push numbers about WAL edits replay into the metrics subsystem. This will take a - * single function call and turn it into multiple manipulations of the hadoop metrics system. - */ -@InterfaceAudience.Private -public class MetricsWALEditsReplay { - private static final Log LOG = LogFactory.getLog(MetricsWALEditsReplay.class); - - private final MetricsEditsReplaySource source; - - public MetricsWALEditsReplay() { - source = CompatibilitySingletonFactory.getInstance(MetricsEditsReplaySource.class); - } - - /** - * Add the time a replay command took - */ - void updateReplayTime(long time) { - source.updateReplayTime(time); - } - - /** - * Add the batch size of each replay - */ - void updateReplayBatchSize(long size) { - source.updateReplayDataSize(size); - } - - /** - * Add the payload data size of each replay - */ - void updateReplayDataSize(long size) { - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java index 9d5e052..fee42fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java @@ -67,7 +67,6 @@ public class WALEditsReplaySink { private final Configuration conf; private final HConnection conn; private final TableName tableName; - private final MetricsWALEditsReplay metrics; private final AtomicLong totalReplayedEdits = new AtomicLong(); private final boolean skipErrors; private final int replayTimeout; @@ -83,7 +82,6 @@ public class WALEditsReplaySink { public WALEditsReplaySink(Configuration conf, TableName tableName, HConnection conn) throws IOException { this.conf = conf; - this.metrics = new MetricsWALEditsReplay(); this.conn = conn; this.tableName = tableName; this.skipErrors = conf.getBoolean(HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS, @@ -145,9 +143,6 @@ public class WALEditsReplaySink { LOG.debug("number of rows:" + entries.size() + " are sent by batch! spent " + endTime + "(ms)!"); - metrics.updateReplayTime(endTime); - metrics.updateReplayBatchSize(batchSize); - this.totalReplayedEdits.addAndGet(batchSize); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java index ac1257f..1aa2c36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.wal.WAL.Entry; -import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; import com.google.common.util.concurrent.Service; @@ -57,7 +56,6 @@ public interface ReplicationEndpoint extends Service { private final ReplicationPeer replicationPeer; private final String peerId; private final UUID clusterId; - private final MetricsSource metrics; @InterfaceAudience.Private public Context( @@ -67,7 +65,6 @@ public interface ReplicationEndpoint extends Service { final String peerId, final UUID clusterId, final ReplicationPeer replicationPeer, - final MetricsSource metrics, final TableDescriptors tableDescriptors) { this.peerConfig = peerConfig; this.conf = conf; @@ -75,7 +72,6 @@ public interface ReplicationEndpoint extends Service { this.clusterId = clusterId; this.peerId = peerId; this.replicationPeer = replicationPeer; - this.metrics = metrics; this.tableDescriptors = tableDescriptors; } public Configuration getConfiguration() { @@ -96,9 +92,6 @@ public interface ReplicationEndpoint extends Service { public ReplicationPeer getReplicationPeer() { return replicationPeer; } - public MetricsSource getMetrics() { - return metrics; - } public TableDescriptors getTableDescriptors() { return tableDescriptors; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 70cc420..7dd9032 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -81,8 +81,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi private int maxRetriesMultiplier; // Socket timeouts require even bolder actions since we don't want to DDOS private int socketTimeoutMultiplier; - //Metrics for this source - private MetricsSource metrics; // Handles connecting to peer region servers private ReplicationSinkManager replicationSinkMgr; private boolean peersSelected = false; @@ -107,7 +105,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi this.conn = (HConnection) ConnectionFactory.createConnection(this.conf); this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); - this.metrics = context.getMetrics(); // ReplicationQueueInfo parses the peerId out of the znode for us this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf); // per sink thread pool @@ -249,14 +246,11 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi // if we had any exceptions, try again throw iox; } - // update metrics - this.metrics.setAgeOfLastShippedOp(entries.get(entries.size() - 1).getKey().getWriteTime(), - walGroupId); return true; } catch (IOException ioe) { // Didn't ship anything, but must still age the last time we did - this.metrics.refreshAgeOfLastShippedOp(walGroupId); + if (ioe instanceof RemoteException) { ioe = ((RemoteException) ioe).unwrapRemoteException(); LOG.warn("Can't replicate because of an error on the remote cluster: ", ioe); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java deleted file mode 100644 index f308daf..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -/** - * This class is for maintaining the various replication statistics for a sink and publishing them - * through the metrics interfaces. - */ -@InterfaceAudience.Private -public class MetricsSink { - - private long lastTimestampForAge = System.currentTimeMillis(); - private final MetricsReplicationSinkSource mss; - - public MetricsSink() { - mss = - CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class).getSink(); - } - - /** - * Set the age of the last applied operation - * - * @param timestamp The timestamp of the last operation applied. - * @return the age that was set - */ - public long setAgeOfLastAppliedOp(long timestamp) { - long age = 0; - if (lastTimestampForAge != timestamp) { - lastTimestampForAge = timestamp; - age = System.currentTimeMillis() - lastTimestampForAge; - } - mss.setLastAppliedOpAge(age); - return age; - } - - /** - * Refreshing the age makes sure the value returned is the actual one and - * not the one set a replication time - * @return refreshed age - */ - public long refreshAgeOfLastAppliedOp() { - return setAgeOfLastAppliedOp(lastTimestampForAge); - } - - /** - * Convience method to change metrics when a batch of operations are applied. - * - * @param batchSize - */ - public void applyBatch(long batchSize) { - mss.incrAppliedBatches(1); - mss.incrAppliedOps(batchSize); - } - - /** - * Convience method to change metrics when a batch of operations are applied. - * - * @param batchSize total number of mutations that are applied/replicated - * @param hfileSize total number of hfiles that are applied/replicated - */ - public void applyBatch(long batchSize, long hfileSize) { - applyBatch(batchSize); - mss.incrAppliedHFiles(hfileSize); - } - - /** - * Get the Age of Last Applied Op - * @return ageOfLastAppliedOp - */ - public long getAgeOfLastAppliedOp() { - return mss.getLastAppliedOpAge(); - } - - /** - * Get the TimeStampOfLastAppliedOp. If no replication Op applied yet, the value is the timestamp - * at which hbase instance starts - * @return timeStampsOfLastAppliedOp; - */ - public long getTimeStampOfLastAppliedOp() { - return this.lastTimestampForAge; - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java deleted file mode 100644 index 9687af7..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ /dev/null @@ -1,228 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver; - -import java.util.HashMap; -import java.util.Map; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; - -/** - * This class is for maintaining the various replication statistics for a source and publishing them - * through the metrics interfaces. - */ -@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) -public class MetricsSource { - - private static final Log LOG = LogFactory.getLog(MetricsSource.class); - - // tracks last shipped timestamp for each wal group - private Map lastTimeStamps = new HashMap(); - private int lastQueueSize = 0; - private long lastHFileRefsQueueSize = 0; - private String id; - - private final MetricsReplicationSourceSource singleSourceSource; - private final MetricsReplicationSourceSource globalSourceSource; - - - /** - * Constructor used to register the metrics - * - * @param id Name of the source this class is monitoring - */ - public MetricsSource(String id) { - this.id = id; - singleSourceSource = - CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class) - .getSource(id); - globalSourceSource = CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); - } - - /** - * Set the age of the last edit that was shipped - * @param timestamp write time of the edit - * @param walGroup which group we are setting - */ - public void setAgeOfLastShippedOp(long timestamp, String walGroup) { - long age = EnvironmentEdgeManager.currentTime() - timestamp; - singleSourceSource.setLastShippedAge(age); - globalSourceSource.setLastShippedAge(age); - this.lastTimeStamps.put(walGroup, timestamp); - } - - /** - * Convenience method to use the last given timestamp to refresh the age of the last edit. Used - * when replication fails and need to keep that metric accurate. - * @param walGroupId id of the group to update - */ - public void refreshAgeOfLastShippedOp(String walGroupId) { - Long lastTimestamp = this.lastTimeStamps.get(walGroupId); - if (lastTimestamp == null) { - this.lastTimeStamps.put(walGroupId, 0L); - lastTimestamp = 0L; - } - if (lastTimestamp > 0) { - setAgeOfLastShippedOp(lastTimestamp, walGroupId); - } - } - - /** - * Set the size of the log queue - * - * @param size the size. - */ - public void setSizeOfLogQueue(int size) { - singleSourceSource.setSizeOfLogQueue(size); - globalSourceSource.incrSizeOfLogQueue(size - lastQueueSize); - lastQueueSize = size; - } - - /** - * Add on the the number of log edits read - * - * @param delta the number of log edits read. - */ - private void incrLogEditsRead(long delta) { - singleSourceSource.incrLogReadInEdits(delta); - globalSourceSource.incrLogReadInEdits(delta); - } - - /** Increment the number of log edits read by one. */ - public void incrLogEditsRead() { - incrLogEditsRead(1); - } - - /** - * Add on the number of log edits filtered - * - * @param delta the number filtered. - */ - public void incrLogEditsFiltered(long delta) { - singleSourceSource.incrLogEditsFiltered(delta); - globalSourceSource.incrLogEditsFiltered(delta); - } - - /** The number of log edits filtered out. */ - public void incrLogEditsFiltered() { - incrLogEditsFiltered(1); - } - - /** - * Convience method to apply changes to metrics do to shipping a batch of logs. - * - * @param batchSize the size of the batch that was shipped to sinks. - */ - public void shipBatch(long batchSize, int sizeInKB) { - singleSourceSource.incrBatchesShipped(1); - globalSourceSource.incrBatchesShipped(1); - - singleSourceSource.incrOpsShipped(batchSize); - globalSourceSource.incrOpsShipped(batchSize); - - singleSourceSource.incrShippedKBs(sizeInKB); - globalSourceSource.incrShippedKBs(sizeInKB); - } - - /** - * Convience method to apply changes to metrics do to shipping a batch of logs. - * - * @param batchSize the size of the batch that was shipped to sinks. - * @param hfiles total number of hfiles shipped to sinks. - */ - public void shipBatch(long batchSize, int sizeInKB, long hfiles) { - shipBatch(batchSize, sizeInKB); - singleSourceSource.incrHFilesShipped(hfiles); - globalSourceSource.incrHFilesShipped(hfiles); - } - - /** increase the byte number read by source from log file */ - public void incrLogReadInBytes(long readInBytes) { - singleSourceSource.incrLogReadInBytes(readInBytes); - globalSourceSource.incrLogReadInBytes(readInBytes); - } - - /** Removes all metrics about this Source. */ - public void clear() { - singleSourceSource.clear(); - globalSourceSource.decrSizeOfLogQueue(lastQueueSize); - globalSourceSource.decrSizeOfHFileRefsQueue(lastHFileRefsQueueSize); - lastTimeStamps.clear(); - lastQueueSize = 0; - lastHFileRefsQueueSize = 0; - } - - /** - * Get AgeOfLastShippedOp - * @return AgeOfLastShippedOp - */ - public Long getAgeOfLastShippedOp() { - return singleSourceSource.getLastShippedAge(); - } - - /** - * Get the sizeOfLogQueue - * @return sizeOfLogQueue - */ - public int getSizeOfLogQueue() { - return this.lastQueueSize; - } - - /** - * Get the timeStampsOfLastShippedOp, if there are multiple groups, return the latest one - * @return lastTimestampForAge - */ - public long getTimeStampOfLastShippedOp() { - long lastTimestamp = 0L; - for (long ts : lastTimeStamps.values()) { - if (ts > lastTimestamp) { - lastTimestamp = ts; - } - } - return lastTimestamp; - } - - /** - * Get the slave peer ID - * @return peerID - */ - public String getPeerID() { - return id; - } - - public void incrSizeOfHFileRefsQueue(long size) { - singleSourceSource.incrSizeOfHFileRefsQueue(size); - globalSourceSource.incrSizeOfHFileRefsQueue(size); - lastHFileRefsQueueSize = size; - } - - public void decrSizeOfHFileRefsQueue(int size) { - singleSourceSource.decrSizeOfHFileRefsQueue(size); - globalSourceSource.decrSizeOfHFileRefsQueue(size); - lastHFileRefsQueueSize -= size; - if (lastHFileRefsQueueSize < 0) { - lastHFileRefsQueueSize = 0; - } - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index 30153f8..e43b79e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -289,8 +289,6 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { entryBuffers.appendEntry(entry); } outputSink.flush(); // make sure everything is flushed - ctx.getMetrics().incrLogEditsFiltered( - outputSink.getSkippedEditsCounter().getAndSet(0)); return true; } catch (InterruptedException e) { Thread.currentThread().interrupt(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index d2a0776..2cb1b50 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -242,9 +242,6 @@ public class Replication extends WALActionsListener.Base implements throw new IOException(e); } this.replicationSink = new ReplicationSink(this.conf, this.server); - this.scheduleThreadPool.scheduleAtFixedRate( - new ReplicationStatisticsThread(this.replicationSink, this.replicationManager), - statsThreadPeriod, statsThreadPeriod, TimeUnit.SECONDS); } } @@ -368,34 +365,6 @@ public class Replication extends WALActionsListener.Base implements } } - /* - * Statistics thread. Periodically prints the cache statistics to the log. - */ - static class ReplicationStatisticsThread extends Thread { - - private final ReplicationSink replicationSink; - private final ReplicationSourceManager replicationManager; - - public ReplicationStatisticsThread(final ReplicationSink replicationSink, - final ReplicationSourceManager replicationManager) { - super("ReplicationStatisticsThread"); - this.replicationManager = replicationManager; - this.replicationSink = replicationSink; - } - - @Override - public void run() { - printStats(this.replicationManager.getStats()); - printStats(this.replicationSink.getStats()); - } - - private void printStats(String stats) { - if (!stats.isEmpty()) { - LOG.info(stats); - } - } - } - @Override public ReplicationLoad refreshAndGetReplicationLoad() { if (this.replicationLoad == null) { @@ -409,15 +378,6 @@ public class Replication extends WALActionsListener.Base implements private void buildReplicationLoad() { // get source List sources = this.replicationManager.getSources(); - List sourceMetricsList = new ArrayList(); - - for (ReplicationSourceInterface source : sources) { - if (source instanceof ReplicationSource) { - sourceMetricsList.add(((ReplicationSource) source).getSourceMetrics()); - } - } - // get sink - MetricsSink sinkMetrics = this.replicationSink.getSinkMetrics(); - this.replicationLoad.buildReplicationLoad(sourceMetricsList, sinkMetrics); + this.replicationLoad.buildReplicationLoad(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java index 8dd42bc..6cbd471 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java @@ -36,9 +36,6 @@ public class ReplicationLoad { // Empty load instance. public static final ReplicationLoad EMPTY_REPLICATIONLOAD = new ReplicationLoad(); - private List sourceMetricsList; - private MetricsSink sinkMetrics; - private List replicationLoadSourceList; private ClusterStatusProtos.ReplicationLoadSink replicationLoadSink; @@ -49,53 +46,17 @@ public class ReplicationLoad { /** * buildReplicationLoad - * @param srMetricsList - * @param skMetrics */ - public void buildReplicationLoad(final List srMetricsList, - final MetricsSink skMetrics) { - this.sourceMetricsList = srMetricsList; - this.sinkMetrics = skMetrics; + public void buildReplicationLoad() { // build the SinkLoad ClusterStatusProtos.ReplicationLoadSink.Builder rLoadSinkBuild = ClusterStatusProtos.ReplicationLoadSink.newBuilder(); - rLoadSinkBuild.setAgeOfLastAppliedOp(sinkMetrics.getAgeOfLastAppliedOp()); - rLoadSinkBuild.setTimeStampsOfLastAppliedOp(sinkMetrics.getTimeStampOfLastAppliedOp()); this.replicationLoadSink = rLoadSinkBuild.build(); // build the SourceLoad List this.replicationLoadSourceList = new ArrayList(); - for (MetricsSource sm : this.sourceMetricsList) { - long ageOfLastShippedOp = sm.getAgeOfLastShippedOp(); - int sizeOfLogQueue = sm.getSizeOfLogQueue(); - long timeStampOfLastShippedOp = sm.getTimeStampOfLastShippedOp(); - long replicationLag; - long timePassedAfterLastShippedOp = - EnvironmentEdgeManager.currentTime() - timeStampOfLastShippedOp; - if (sizeOfLogQueue != 0) { - // err on the large side - replicationLag = Math.max(ageOfLastShippedOp, timePassedAfterLastShippedOp); - } else if (timePassedAfterLastShippedOp < 2 * ageOfLastShippedOp) { - replicationLag = ageOfLastShippedOp; // last shipped happen recently - } else { - // last shipped may happen last night, - // so NO real lag although ageOfLastShippedOp is non-zero - replicationLag = 0; - } - - ClusterStatusProtos.ReplicationLoadSource.Builder rLoadSourceBuild = - ClusterStatusProtos.ReplicationLoadSource.newBuilder(); - rLoadSourceBuild.setPeerID(sm.getPeerID()); - rLoadSourceBuild.setAgeOfLastShippedOp(ageOfLastShippedOp); - rLoadSourceBuild.setSizeOfLogQueue(sizeOfLogQueue); - rLoadSourceBuild.setTimeStampOfLastShippedOp(timeStampOfLastShippedOp); - rLoadSourceBuild.setReplicationLag(replicationLag); - - this.replicationLoadSourceList.add(rLoadSourceBuild.build()); - } - } /** @@ -103,7 +64,6 @@ public class ReplicationLoad { * @return a string contains sourceReplicationLoad information */ public String sourceToString() { - if (this.sourceMetricsList == null) return null; StringBuilder sb = new StringBuilder(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 9e7b3af..68724dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -81,7 +81,6 @@ public class ReplicationSink { // Volatile because of note in here -- look for double-checked locking: // http://www.oracle.com/technetwork/articles/javase/bloch-effective-08-qa-140880.html private volatile Connection sharedHtableCon; - private final MetricsSink metrics; private final AtomicLong totalReplicatedEdits = new AtomicLong(); private final Object sharedHtableConLock = new Object(); // Number of hfiles that we successfully replicated @@ -99,7 +98,6 @@ public class ReplicationSink { throws IOException { this.conf = HBaseConfiguration.create(conf); decorateConf(); - this.metrics = new MetricsSink(); String className = conf.get("hbase.replication.source.fs.conf.provider", @@ -222,8 +220,6 @@ public class ReplicationSink { } int size = entries.size(); - this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime()); - this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated); this.totalReplicatedEdits.addAndGet(totalReplicated); } catch (IOException ex) { LOG.error("Unable to accept edit because:", ex); @@ -404,16 +400,7 @@ public class ReplicationSink { * of the last edit that was applied */ public String getStats() { - return this.totalReplicatedEdits.get() == 0 ? "" : "Sink: " + - "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp() + - ", total replicated edits: " + this.totalReplicatedEdits; + return ""; } - /** - * Get replication Sink Metrics - * @return MetricsSink - */ - public MetricsSink getSinkMetrics() { - return this.metrics; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 868ddee..9de0c72 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -125,8 +125,6 @@ public class ReplicationSource extends Thread private int maxRetriesMultiplier; // Indicates if this particular source is running private volatile boolean sourceRunning = false; - // Metrics for this source - private MetricsSource metrics; //WARN threshold for the number of queued logs, defaults to 2 private int logQueueWarnThreshold; // ReplicationEndpoint which will handle the actual replication @@ -156,8 +154,7 @@ public class ReplicationSource extends Thread public void init(final Configuration conf, final FileSystem fs, final ReplicationSourceManager manager, final ReplicationQueues replicationQueues, final ReplicationPeers replicationPeers, final Stoppable stopper, - final String peerClusterZnode, final UUID clusterId, ReplicationEndpoint replicationEndpoint, - final MetricsSource metrics) + final String peerClusterZnode, final UUID clusterId, ReplicationEndpoint replicationEndpoint) throws IOException { this.stopper = stopper; this.conf = HBaseConfiguration.create(conf); @@ -177,7 +174,7 @@ public class ReplicationSource extends Thread this.replicationPeers = replicationPeers; this.manager = manager; this.fs = fs; - this.metrics = metrics; + this.clusterId = clusterId; this.peerClusterZnode = peerClusterZnode; @@ -219,7 +216,6 @@ public class ReplicationSource extends Thread } queue.put(log); int queueSize = logQueueSize.incrementAndGet(); - this.metrics.setSizeOfLogQueue(queueSize); // This will log a warning for each new log that gets created above the warn threshold if (queue.size() > this.logQueueWarnThreshold) { LOG.warn("WAL group " + logPrefix + " queue size: " + queueSize @@ -242,7 +238,6 @@ public class ReplicationSource extends Thread if (tableCFMap.containsKey(tableName) && (tableCfs == null || tableCfs.contains(Bytes.toString(family)))) { this.replicationQueues.addHFileRefs(peerId, files); - metrics.incrSizeOfHFileRefsQueue(files.size()); } else { LOG.debug("HFiles will not be replicated belonging to the table " + tableName + " family " + Bytes.toString(family) + " to peer id " + peerId); @@ -251,13 +246,11 @@ public class ReplicationSource extends Thread // user has explicitly not defined any table cfs for replication, means replicate all the // data this.replicationQueues.addHFileRefs(peerId, files); - metrics.incrSizeOfHFileRefsQueue(files.size()); } } private void uninitialize() { LOG.debug("Source exiting " + this.peerId); - metrics.clear(); if (replicationEndpoint.state() == Service.State.STARTING || replicationEndpoint.state() == Service.State.RUNNING) { replicationEndpoint.stopAndWait(); @@ -476,14 +469,6 @@ public class ReplicationSource extends Thread return sb.toString(); } - /** - * Get Replication Source Metrics - * @return sourceMetrics - */ - public MetricsSource getSourceMetrics() { - return this.metrics; - } - public class ReplicationSourceWorkerThread extends Thread { ReplicationSource source; String walGroupId; @@ -640,7 +625,6 @@ public class ReplicationSource extends Thread sleepMultiplier = 1; // if there was nothing to ship and it's not an error // set "ageOfLastShippedOp" to to indicate that we're current - metrics.setAgeOfLastShippedOp(System.currentTimeMillis(), walGroupId); } if (sleepForRetries("Nothing to replicate", sleepMultiplier)) { sleepMultiplier++; @@ -689,7 +673,6 @@ public class ReplicationSource extends Thread long positionBeforeRead = this.repLogReader.getPosition(); WAL.Entry entry = this.repLogReader.readNextAndSetPosition(); while (entry != null) { - metrics.incrLogEditsRead(); seenEntries++; // don't replicate if the log entries have already been consumed by the cluster @@ -710,8 +693,6 @@ public class ReplicationSource extends Thread currentNbOperations += countDistinctRowKeys(edit); entries.add(entry); currentSize += entry.getEdit().heapSize(); - } else { - metrics.incrLogEditsFiltered(); } } // Stop if too many entries or too big @@ -727,7 +708,6 @@ public class ReplicationSource extends Thread break; } } - metrics.incrLogReadInBytes(this.repLogReader.getPosition() - positionBeforeRead); if (currentWALisBeingWrittenTo) { return false; } @@ -752,7 +732,6 @@ public class ReplicationSource extends Thread for (int j = 0; j < stores.size(); j++) { List storeFileList = stores.get(j).getStoreFileList(); manager.cleanUpHFileRefs(peerId, storeFileList); - metrics.decrSizeOfHFileRefsQueue(storeFileList.size()); } } } @@ -767,7 +746,6 @@ public class ReplicationSource extends Thread if (this.currentPath == null) { this.currentPath = queue.poll(sleepForRetries, TimeUnit.MILLISECONDS); int queueSize = logQueueSize.decrementAndGet(); - metrics.setSizeOfLogQueue(queueSize); if (this.currentPath != null) { // For recovered queue: must use peerClusterZnode since peerId is a parsed value manager.cleanOldLogs(this.currentPath.getName(), peerClusterZnode, @@ -1005,9 +983,6 @@ public class ReplicationSource extends Thread totalReplicatedEdits.addAndGet(entries.size()); totalReplicatedOperations.addAndGet(currentNbOperations); // FIXME check relationship between wal group and overall - metrics.shipBatch(currentNbOperations, currentSize / 1024, currentNbHFiles); - metrics.setAgeOfLastShippedOp(entries.get(entries.size() - 1).getKey().getWriteTime(), - walGroupId); if (LOG.isTraceEnabled()) { LOG.trace("Replicated " + totalReplicatedEdits + " entries in total, or " + totalReplicatedOperations + " operations in " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java index 7f4a9f7..35736aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java @@ -54,8 +54,8 @@ public interface ReplicationSourceInterface { public void init(final Configuration conf, final FileSystem fs, final ReplicationSourceManager manager, final ReplicationQueues replicationQueues, final ReplicationPeers replicationPeers, final Stoppable stopper, - final String peerClusterZnode, final UUID clusterId, ReplicationEndpoint replicationEndpoint, - final MetricsSource metrics) throws IOException; + final String peerClusterZnode, final UUID clusterId, + ReplicationEndpoint replicationEndpoint) throws IOException; /** * Add a log to the list of logs to replicate diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index 9ff4b2d..4e8e1aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -487,14 +487,13 @@ public class ReplicationSourceManager implements ReplicationListener { throw new IOException(e); } - MetricsSource metrics = new MetricsSource(peerId); // init replication source src.init(conf, fs, manager, replicationQueues, replicationPeers, server, peerId, - clusterId, replicationEndpoint, metrics); + clusterId, replicationEndpoint); // init replication endpoint replicationEndpoint.init(new ReplicationEndpoint.Context(replicationPeer.getConfiguration(), - fs, peerConfig, peerId, clusterId, replicationPeer, metrics, tableDescriptors)); + fs, peerConfig, peerId, clusterId, replicationPeer, tableDescriptors)); return src; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 9dbeed7..7945050 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -118,7 +118,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.Block import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; -import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.UserProvider; @@ -1333,7 +1332,7 @@ public class HBaseFsck extends Configured implements Closeable { Configuration confForWAL = new Configuration(c); confForWAL.set(HConstants.HBASE_DIR, rootdir.toString()); WAL wal = (new WALFactory(confForWAL, - Collections.singletonList(new MetricsWAL()), + Collections.emptyList(), "hbck-meta-recovery-" + RandomStringUtils.randomNumeric(8))). getWAL(metaHRI.getEncodedNameAsBytes(), metaHRI.getTable().getNamespace()); HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor, wal); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 2885428..170290b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -36,7 +36,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; -import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; // imports for classes still in regionserver.wal import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; @@ -171,7 +170,7 @@ class RegionGroupingProvider implements WALProvider { if (isMeta) { hlogPrefix = this.providerId; // don't watch log roll for meta - listeners = Collections. singletonList(new MetricsWAL()); + listeners = Collections.emptyList(); } else { hlogPrefix = groupName; listeners = this.listeners; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index 0317b66..e5faea7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; // imports for things that haven't moved from regionserver.wal yet. -import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader; import org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; @@ -237,7 +236,7 @@ public class WALFactory { WALProvider metaProvider = this.metaProvider.get(); if (null == metaProvider) { final WALProvider temp = getProvider(META_WAL_PROVIDER, DEFAULT_META_WAL_PROVIDER, - Collections.singletonList(new MetricsWAL()), + Collections.emptyList(), DefaultWALProvider.META_WAL_PROVIDER_ID); if (this.metaProvider.compareAndSet(null, temp)) { metaProvider = temp; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 71a3344..da6a25e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -102,7 +102,6 @@ import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; -import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache; @@ -2276,7 +2275,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { Configuration confForWAL = new Configuration(conf); confForWAL.set(HConstants.HBASE_DIR, rootDir.toString()); return (new WALFactory(confForWAL, - Collections.singletonList(new MetricsWAL()), + Collections.emptyList(), "hregion-" + RandomStringUtils.randomNumeric(8))). getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index c2d273b..1ba24f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; @@ -88,9 +87,6 @@ public class MiniHBaseCluster extends HBaseCluster { super(conf); conf.set(HConstants.MASTER_PORT, "0"); - // Hadoop 2 - CompatibilityFactory.getInstance(MetricsAssertHelper.class).init(); - init(numMasters, numRegionServers, masterClass, regionserverClass); this.initialClusterStatus = getClusterStatus(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java deleted file mode 100644 index 1f149bf..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java +++ /dev/null @@ -1,277 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Hashtable; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.Set; - -import javax.management.MBeanAttributeInfo; -import javax.management.MBeanInfo; -import javax.management.MBeanServerConnection; -import javax.management.ObjectInstance; -import javax.management.ObjectName; -import javax.management.remote.JMXConnector; -import javax.management.remote.JMXConnectorFactory; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.master.balancer.BalancerTestBase; -import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.net.DNSToSwitchMapping; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.FixMethodOrder; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runners.MethodSorters; - -@Category({ MiscTests.class, MediumTests.class }) -@FixMethodOrder(MethodSorters.NAME_ASCENDING) -public class TestStochasticBalancerJmxMetrics extends BalancerTestBase { - private static final Log LOG = LogFactory.getLog(TestStochasticBalancerJmxMetrics.class); - private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static int connectorPort = 61120; - private static StochasticLoadBalancer loadBalancer; - /** - * a simple cluster for testing JMX. - */ - private static int[] mockCluster_ensemble = new int[] { 0, 1, 2, 3 }; - private static int[] mockCluster_pertable_1 = new int[] { 0, 1, 2 }; - private static int[] mockCluster_pertable_2 = new int[] { 3, 1, 1 }; - private static int[] mockCluster_pertable_namespace = new int[] { 1, 3, 1 }; - - private static final String TABLE_NAME_1 = "Table1"; - private static final String TABLE_NAME_2 = "Table2"; - private static final String TABLE_NAME_NAMESPACE = "hbase:namespace"; - - private static Configuration conf = null; - - /** - * Setup the environment for the test. - */ - @BeforeClass - public static void setupBeforeClass() throws Exception { - - conf = UTIL.getConfiguration(); - - conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class); - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 0.75f); - conf.setFloat("hbase.regions.slop", 0.0f); - conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, JMXListener.class.getName()); - Random rand = new Random(); - for (int i = 0; i < 10; i++) { - do { - int sign = i % 2 == 0 ? 1 : -1; - connectorPort += sign * rand.nextInt(100); - } while (!HBaseTestingUtility.available(connectorPort)); - try { - conf.setInt("regionserver.rmi.registry.port", connectorPort); - - UTIL.startMiniCluster(); - break; - } catch (Exception e) { - LOG.debug("Encountered exception when starting cluster. Trying port " + connectorPort, e); - try { - // this is to avoid "IllegalStateException: A mini-cluster is already running" - UTIL.shutdownMiniCluster(); - } catch (Exception ex) { - LOG.debug("Encountered exception shutting down cluster", ex); - } - } - } - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - UTIL.shutdownMiniCluster(); - } - - /** - * In Ensemble mode, there should be only one ensemble table - */ - @Test (timeout=60000) - public void testJmxMetrics_EnsembleMode() throws Exception { - loadBalancer = new StochasticLoadBalancer(); - - conf.setBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, false); - loadBalancer.setConf(conf); - - TableName tableName = TableName.valueOf(HConstants.ENSEMBLE_TABLE_NAME); - Map> clusterState = mockClusterServers(mockCluster_ensemble); - loadBalancer.balanceCluster(tableName, clusterState); - - String[] tableNames = new String[] { tableName.getNameAsString() }; - String[] functionNames = loadBalancer.getCostFunctionNames(); - Set jmxMetrics = readJmxMetricsWithRetry(); - Set expectedMetrics = getExpectedJmxMetrics(tableNames, functionNames); - - // printMetrics(jmxMetrics, "existing metrics in ensemble mode"); - // printMetrics(expectedMetrics, "expected metrics in ensemble mode"); - - // assert that every expected is in the JMX - for (String expected : expectedMetrics) { - assertTrue("Metric " + expected + " can not be found in JMX in ensemble mode.", - jmxMetrics.contains(expected)); - } - } - - /** - * In per-table mode, each table has a set of metrics - */ - @Test (timeout=60000) - public void testJmxMetrics_PerTableMode() throws Exception { - loadBalancer = new StochasticLoadBalancer(); - - conf.setBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, true); - loadBalancer.setConf(conf); - - // NOTE the size is normally set in setClusterStatus, for test purpose, we set it manually - // Tables: hbase:namespace, table1, table2 - // Functions: costFunctions, overall - String[] functionNames = loadBalancer.getCostFunctionNames(); - loadBalancer.updateMetricsSize(3 * (functionNames.length + 1)); - - // table 1 - TableName tableName = TableName.valueOf(TABLE_NAME_1); - Map> clusterState = mockClusterServers(mockCluster_pertable_1); - loadBalancer.balanceCluster(tableName, clusterState); - - // table 2 - tableName = TableName.valueOf(TABLE_NAME_2); - clusterState = mockClusterServers(mockCluster_pertable_2); - loadBalancer.balanceCluster(tableName, clusterState); - - // table hbase:namespace - tableName = TableName.valueOf(TABLE_NAME_NAMESPACE); - clusterState = mockClusterServers(mockCluster_pertable_namespace); - loadBalancer.balanceCluster(tableName, clusterState); - - String[] tableNames = new String[] { TABLE_NAME_1, TABLE_NAME_2, TABLE_NAME_NAMESPACE }; - Set jmxMetrics = readJmxMetricsWithRetry(); - Set expectedMetrics = getExpectedJmxMetrics(tableNames, functionNames); - - // printMetrics(jmxMetrics, "existing metrics in per-table mode"); - // printMetrics(expectedMetrics, "expected metrics in per-table mode"); - - // assert that every expected is in the JMX - for (String expected : expectedMetrics) { - assertTrue("Metric " + expected + " can not be found in JMX in per-table mode.", - jmxMetrics.contains(expected)); - } - } - - private Set readJmxMetricsWithRetry() throws IOException { - final int count = 0; - for (int i = 0; i < 10; i++) { - Set metrics = readJmxMetrics(); - if (metrics != null) return metrics; - LOG.warn("Failed to get jmxmetrics... sleeping, retrying; " + i + " of " + count + " times"); - Threads.sleep(1000); - } - return null; - } - - /** - * Read the attributes from Hadoop->HBase->Master->Balancer in JMX - * @throws IOException - */ - private Set readJmxMetrics() throws IOException { - JMXConnector connector = null; - ObjectName target = null; - MBeanServerConnection mb = null; - try { - connector = - JMXConnectorFactory.connect(JMXListener.buildJMXServiceURL(connectorPort, connectorPort)); - mb = connector.getMBeanServerConnection(); - - Hashtable pairs = new Hashtable<>(); - pairs.put("service", "HBase"); - pairs.put("name", "Master"); - pairs.put("sub", "Balancer"); - target = new ObjectName("Hadoop", pairs); - MBeanInfo beanInfo = mb.getMBeanInfo(target); - - Set existingAttrs = new HashSet(); - for (MBeanAttributeInfo attrInfo : beanInfo.getAttributes()) { - existingAttrs.add(attrInfo.getName()); - } - return existingAttrs; - } catch (Exception e) { - LOG.warn("Failed to get bean!!! " + target, e); - if (mb != null) { - Set instances = mb.queryMBeans(null, null); - Iterator iterator = instances.iterator(); - System.out.println("MBean Found:"); - while (iterator.hasNext()) { - ObjectInstance instance = iterator.next(); - System.out.println("Class Name: " + instance.getClassName()); - System.out.println("Object Name: " + instance.getObjectName()); - } - } - } finally { - if (connector != null) { - try { - connector.close(); - } catch (Exception e) { - e.printStackTrace(); - } - } - } - return null; - } - - /** - * Given the tables and functions, return metrics names that should exist in JMX - */ - private Set getExpectedJmxMetrics(String[] tableNames, String[] functionNames) { - Set ret = new HashSet(); - - for (String tableName : tableNames) { - ret.add(StochasticLoadBalancer.composeAttributeName(tableName, "Overall")); - for (String functionName : functionNames) { - String metricsName = StochasticLoadBalancer.composeAttributeName(tableName, functionName); - ret.add(metricsName); - } - } - - return ret; - } - - private static void printMetrics(Set metrics, String info) { - if (null != info) LOG.info("++++ ------ " + info + " ------"); - - LOG.info("++++ metrics count = " + metrics.size()); - for (String str : metrics) { - LOG.info(" ++++ " + str); - } - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java index 04c592e..e98600b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -27,9 +26,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.ipc.RpcServerInterface; -import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -51,8 +48,6 @@ import static junit.framework.TestCase.assertEquals; @Category({MediumTests.class, ClientTests.class}) public class TestMultiRespectsLimits { private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final MetricsAssertHelper METRICS_ASSERT = - CompatibilityFactory.getInstance(MetricsAssertHelper.class); private final static byte[] FAMILY = Bytes.toBytes("D"); public static final int MAX_SIZE = 500; @@ -94,20 +89,9 @@ public class TestMultiRespectsLimits { } RpcServerInterface rpcServer = TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer(); - BaseSource s = rpcServer.getMetrics().getMetricsSource(); - long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s); - long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s); Result[] results = t.get(gets); assertEquals(MAX_SIZE, results.length); - - // Cells from TEST_UTIL.loadTable have a length of 27. - // Multiplying by less than that gives an easy lower bound on size. - // However in reality each kv is being reported as much higher than that. - METRICS_ASSERT.assertCounterGt("exceptions", - startingExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s); - METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", - startingMultiExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s); } @Test @@ -121,11 +105,6 @@ public class TestMultiRespectsLimits { Table t = TEST_UTIL.getConnection().getTable(name); final HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0); - RpcServerInterface rpcServer = regionServer.getRpcServer(); - BaseSource s = rpcServer.getMetrics().getMetricsSource(); - long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s); - long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s); - byte[] row = Bytes.toBytes("TEST"); byte[][] cols = new byte[][]{ Bytes.toBytes("0"), // Get this @@ -142,7 +121,7 @@ public class TestMultiRespectsLimits { byte[] value = new byte[MAX_SIZE - 100]; ThreadLocalRandom.current().nextBytes(value); - for (byte[] col:cols) { + for (byte[] col : cols) { Put p = new Put(row); p.addImmutable(FAMILY, col, value); t.put(p); @@ -170,8 +149,5 @@ public class TestMultiRespectsLimits { Result[] results = t.get(gets); assertEquals(2, results.length); - METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions, s); - METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", - startingMultiExceptions, s); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java deleted file mode 100644 index 6241f8e..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -public class MetricsHBaseServerWrapperStub implements MetricsHBaseServerWrapper{ - @Override - public long getTotalQueueSize() { - return 101; - } - - @Override - public int getGeneralQueueLength() { - return 102; - } - - @Override - public int getReplicationQueueLength() { - return 103; - } - - @Override - public int getPriorityQueueLength() { - return 104; - } - - @Override - public int getNumOpenConnections() { - return 105; - } - - @Override - public int getActiveRpcHandlerCount() { - return 106; - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java deleted file mode 100644 index 9f1b63a..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import org.apache.hadoop.hbase.CompatibilityFactory; -import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.RegionTooBusyException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; -import org.apache.hadoop.hbase.exceptions.RegionMovedException; -import org.apache.hadoop.hbase.testclassification.RPCTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; -import org.junit.Test; -import org.junit.experimental.categories.Category; - - -import static org.junit.Assert.*; - -@Category({RPCTests.class, SmallTests.class}) -public class TestRpcMetrics { - public MetricsAssertHelper HELPER = CompatibilityFactory.getInstance(MetricsAssertHelper.class); - - @Test - public void testFactory() { - MetricsHBaseServer masterMetrics = new MetricsHBaseServer("HMaster", new MetricsHBaseServerWrapperStub()); - MetricsHBaseServerSource masterSource = masterMetrics.getMetricsSource(); - - MetricsHBaseServer rsMetrics = new MetricsHBaseServer("HRegionServer", new MetricsHBaseServerWrapperStub()); - MetricsHBaseServerSource rsSource = rsMetrics.getMetricsSource(); - - - assertEquals("master", masterSource.getMetricsContext()); - assertEquals("regionserver", rsSource.getMetricsContext()); - - assertEquals("Master,sub=IPC", masterSource.getMetricsJmxContext()); - assertEquals("RegionServer,sub=IPC", rsSource.getMetricsJmxContext()); - - assertEquals("Master", masterSource.getMetricsName()); - assertEquals("RegionServer", rsSource.getMetricsName()); - } - - /** - * This test makes sure that the numbers from a MetricsHBaseServerWrapper are correctly exported - * to hadoop metrics 2 system. - */ - @Test - public void testWrapperSource() { - MetricsHBaseServer mrpc = new MetricsHBaseServer("HMaster", new MetricsHBaseServerWrapperStub()); - MetricsHBaseServerSource serverSource = mrpc.getMetricsSource(); - HELPER.assertGauge("queueSize", 101, serverSource); - HELPER.assertGauge("numCallsInGeneralQueue", 102, serverSource); - HELPER.assertGauge("numCallsInReplicationQueue", 103, serverSource); - HELPER.assertGauge("numCallsInPriorityQueue", 104, serverSource); - HELPER.assertGauge("numOpenConnections", 105, serverSource); - HELPER.assertGauge("numActiveHandler", 106, serverSource); - } - - /** - * Test to make sure that all the actively called method on MetricsHBaseServer work. - */ - @Test - public void testSourceMethods() { - MetricsHBaseServer mrpc = new MetricsHBaseServer("HMaster", new MetricsHBaseServerWrapperStub()); - MetricsHBaseServerSource serverSource = mrpc.getMetricsSource(); - - for (int i=0; i < 12; i++) { - mrpc.authenticationFailure(); - } - for (int i=0; i < 13; i++) { - mrpc.authenticationSuccess(); - } - HELPER.assertCounter("authenticationFailures", 12, serverSource); - HELPER.assertCounter("authenticationSuccesses", 13, serverSource); - - - - for (int i=0; i < 14; i++) { - mrpc.authorizationSuccess(); - } - for (int i=0; i < 15; i++) { - mrpc.authorizationFailure(); - } - HELPER.assertCounter("authorizationSuccesses", 14, serverSource); - HELPER.assertCounter("authorizationFailures", 15, serverSource); - - - mrpc.dequeuedCall(100); - mrpc.processedCall(101); - mrpc.totalCall(102); - HELPER.assertCounter("queueCallTime_NumOps", 1, serverSource); - HELPER.assertCounter("processCallTime_NumOps", 1, serverSource); - HELPER.assertCounter("totalCallTime_NumOps", 1, serverSource); - - mrpc.sentBytes(103); - mrpc.sentBytes(103); - mrpc.sentBytes(103); - - mrpc.receivedBytes(104); - mrpc.receivedBytes(104); - - HELPER.assertCounter("sentBytes", 309, serverSource); - HELPER.assertCounter("receivedBytes", 208, serverSource); - - mrpc.receivedRequest(105); - mrpc.sentResponse(106); - HELPER.assertCounter("requestSize_NumOps", 1, serverSource); - HELPER.assertCounter("responseSize_NumOps", 1, serverSource); - - mrpc.exception(null); - HELPER.assertCounter("exceptions", 1, serverSource); - - mrpc.exception(new RegionMovedException(ServerName.parseServerName("localhost:60020"), 100)); - mrpc.exception(new RegionTooBusyException()); - mrpc.exception(new OutOfOrderScannerNextException()); - mrpc.exception(new NotServingRegionException()); - HELPER.assertCounter("exceptions.RegionMovedException", 1, serverSource); - HELPER.assertCounter("exceptions.RegionTooBusyException", 1, serverSource); - HELPER.assertCounter("exceptions.OutOfOrderScannerNextException", 1, serverSource); - HELPER.assertCounter("exceptions.NotServingRegionException", 1, serverSource); - HELPER.assertCounter("exceptions", 5, serverSource); - } - -} - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 337eeac..6c729e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -45,14 +45,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.HadoopShims; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PerformanceEvaluation; import org.apache.hadoop.hbase.TableName; @@ -89,7 +87,9 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; @@ -97,8 +97,6 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestRule; import org.mockito.Mockito; -import com.google.common.collect.Lists; - /** * Simple test for {@link CellSortReducer} and {@link HFileOutputFormat2}. * Sets up and runs a mapreduce job that writes hfile output. @@ -232,10 +230,9 @@ public class TestHFileOutputFormat2 { private TaskAttemptContext createTestTaskAttemptContext(final Job job) throws Exception { - HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class); - TaskAttemptContext context = hadoop.createTestTaskAttemptContext( - job, "attempt_201402131733_0001_m_000000_0"); - return context; + + return new TaskAttemptContextImpl(job.getConfiguration(), + TaskAttemptID.forName("attempt_201402131733_0001_m_000000_0")); } /* diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java deleted file mode 100644 index 4c672b0..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CompatibilityFactory; -import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; -import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.zookeeper.KeeperException; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({MasterTests.class, MediumTests.class}) -public class TestMasterMetrics { - - private static final Log LOG = LogFactory.getLog(TestMasterMetrics.class); - private static final MetricsAssertHelper metricsHelper = CompatibilityFactory - .getInstance(MetricsAssertHelper.class); - - private static MiniHBaseCluster cluster; - private static HMaster master; - private static HBaseTestingUtility TEST_UTIL; - - public static class MyMaster extends HMaster { - public MyMaster(Configuration conf, CoordinatedStateManager cp) throws IOException, - KeeperException, InterruptedException { - super(conf, cp); - } - - @Override - protected void tryRegionServerReport( - long reportStartTime, long reportEndTime) { - // do nothing - } - } - - @BeforeClass - public static void startCluster() throws Exception { - LOG.info("Starting cluster"); - TEST_UTIL = new HBaseTestingUtility(); - TEST_UTIL.startMiniCluster(1, 1, 1, null, MyMaster.class, null); - cluster = TEST_UTIL.getHBaseCluster(); - LOG.info("Waiting for active/ready master"); - cluster.waitForActiveAndReadyMaster(); - master = cluster.getMaster(); - } - - @AfterClass - public static void after() throws Exception { - if (TEST_UTIL != null) { - TEST_UTIL.shutdownMiniCluster(); - } - } - - @Test(timeout = 300000) - public void testClusterRequests() throws Exception { - - // sending fake request to master to see how metric value has changed - RegionServerStatusProtos.RegionServerReportRequest.Builder request = - RegionServerStatusProtos.RegionServerReportRequest.newBuilder(); - ServerName serverName = cluster.getMaster(0).getServerName(); - request.setServer(ProtobufUtil.toServerName(serverName)); - - MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource(); - ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder() - .setTotalNumberOfRequests(10000) - .build(); - masterSource.init(); - request.setLoad(sl); - master.getMasterRpcServices().regionServerReport(null, request.build()); - - metricsHelper.assertCounter("cluster_requests", 10000, masterSource); - - sl = ClusterStatusProtos.ServerLoad.newBuilder() - .setTotalNumberOfRequests(15000) - .build(); - request.setLoad(sl); - master.getMasterRpcServices().regionServerReport(null, request.build()); - - metricsHelper.assertCounter("cluster_requests", 15000, masterSource); - - master.getMasterRpcServices().regionServerReport(null, request.build()); - - metricsHelper.assertCounter("cluster_requests", 15000, masterSource); - master.stopMaster(); - } - - @Test - public void testDefaultMasterMetrics() throws Exception { - MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource(); - metricsHelper.assertGauge( "numRegionServers", 2, masterSource); - metricsHelper.assertGauge( "averageLoad", 2, masterSource); - metricsHelper.assertGauge( "numDeadRegionServers", 0, masterSource); - - metricsHelper.assertGauge("masterStartTime", master.getMasterStartTime(), masterSource); - metricsHelper.assertGauge("masterActiveTime", master.getMasterActiveTime(), masterSource); - - metricsHelper.assertTag("isActiveMaster", "true", masterSource); - metricsHelper.assertTag("serverName", master.getServerName().toString(), masterSource); - metricsHelper.assertTag("clusterId", master.getClusterId(), masterSource); - metricsHelper.assertTag("zookeeperQuorum", master.getZooKeeper().getQuorum(), masterSource); - } - - @Test - public void testDefaultMasterProcMetrics() throws Exception { - MetricsMasterProcSource masterSource = master.getMasterMetrics().getMetricsProcSource(); - metricsHelper.assertGauge("numMasterWALs", master.getNumWALFiles(), masterSource); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java deleted file mode 100644 index 2df4ac9..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import static org.junit.Assert.*; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Threads; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({MasterTests.class, MediumTests.class}) -public class TestMasterMetricsWrapper { - private static final Log LOG = LogFactory.getLog(TestMasterMetricsWrapper.class); - - private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - - @BeforeClass - public static void setup() throws Exception { - TEST_UTIL.startMiniCluster(1, 4); - } - - @AfterClass - public static void teardown() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Test (timeout = 30000) - public void testInfo() { - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl(master); - assertEquals(master.getAverageLoad(), info.getAverageLoad(), 0); - assertEquals(master.getClusterId(), info.getClusterId()); - assertEquals(master.getMasterActiveTime(), info.getActiveTime()); - assertEquals(master.getMasterStartTime(), info.getStartTime()); - assertEquals(master.getMasterCoprocessors().length, info.getCoprocessors().length); - assertEquals(master.getServerManager().getOnlineServersList().size(), info.getNumRegionServers()); - assertEquals(5, info.getNumRegionServers()); - - String zkServers = info.getZookeeperQuorum(); - assertEquals(zkServers.split(",").length, TEST_UTIL.getZkCluster().getZooKeeperServerNum()); - - final int index = 3; - LOG.info("Stopping " + TEST_UTIL.getMiniHBaseCluster().getRegionServer(index)); - TEST_UTIL.getMiniHBaseCluster().stopRegionServer(index, false); - TEST_UTIL.getMiniHBaseCluster().waitOnRegionServer(index); - // We stopped the regionserver but could take a while for the master to notice it so hang here - // until it does... then move forward to see if metrics wrapper notices. - while (TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() != - 4) { - Threads.sleep(10); - } - assertEquals(4, info.getNumRegionServers()); - assertEquals(1, info.getNumDeadRegionServers()); - assertEquals(1, info.getNumWALFiles()); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java index baac248..74e9d3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java @@ -41,8 +41,6 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.hbase.regionserver.MetricsRegionServer; -import org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapperStub; import org.apache.hadoop.hbase.tmpl.master.AssignmentManagerStatusTmpl; import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl; import org.junit.Before; @@ -109,10 +107,6 @@ public class TestMasterStatusServlet { Mockito.doReturn(tracker).when(master).getMasterAddressTracker(); Mockito.doReturn(FAKE_HOST).when(tracker).getMasterAddress(); - MetricsRegionServer rms = Mockito.mock(MetricsRegionServer.class); - Mockito.doReturn(new MetricsRegionServerWrapperStub()).when(rms).getRegionServerWrapper(); - Mockito.doReturn(rms).when(master).getRegionServerMetrics(); - // Mock admin admin = Mockito.mock(Admin.class); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java index 2a97119..ae53c89 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner; import org.apache.hadoop.hbase.procedure.ProcedureCoordinator; @@ -51,7 +50,6 @@ public class TestSnapshotManager { private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); MasterServices services = Mockito.mock(MasterServices.class); - MetricsMaster metrics = Mockito.mock(MetricsMaster.class); ProcedureCoordinator coordinator = Mockito.mock(ProcedureCoordinator.class); ExecutorService pool = Mockito.mock(ExecutorService.class); MasterFileSystem mfs = Mockito.mock(MasterFileSystem.class); @@ -75,7 +73,7 @@ public class TestSnapshotManager { Mockito.when(services.getMasterFileSystem()).thenReturn(mfs); Mockito.when(mfs.getFileSystem()).thenReturn(fs); Mockito.when(mfs.getRootDir()).thenReturn(UTIL.getDataTestDir()); - return new SnapshotManager(services, metrics, coordinator, pool); + return new SnapshotManager(services, coordinator, pool); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java index 62afaa9..b724e40 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.zookeeper.KeeperException; @@ -56,7 +55,7 @@ public class SimpleMasterProcedureManager extends MasterProcedureManager { } @Override - public void initialize(MasterServices master, MetricsMaster metricsMaster) + public void initialize(MasterServices master) throws KeeperException, IOException, UnsupportedOperationException { this.master = master; this.done = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java deleted file mode 100644 index 0d93284..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java +++ /dev/null @@ -1,362 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrapper { - - @Override - public String getServerName() { - return "test"; - } - - @Override - public String getClusterId() { - return "tClusterId"; - } - - @Override - public String getZookeeperQuorum() { - return "zk"; - } - - @Override - public String getCoprocessors() { - return "co-process"; - } - - @Override - public long getStartCode() { - return 100; - } - - @Override - public long getNumOnlineRegions() { - return 101; - } - - @Override - public long getNumStores() { - return 2; - } - - @Override - public long getNumStoreFiles() { - return 300; - } - - @Override - public long getMemstoreSize() { - return 1025; - } - - @Override - public long getStoreFileSize() { - return 1900; - } - - @Override - public double getRequestsPerSecond() { - return 0; - } - - @Override - public long getTotalRequestCount() { - return 899; - } - - @Override - public long getReadRequestsCount() { - return 997; - } - - @Override - public long getWriteRequestsCount() { - return 707; - } - - @Override - public long getCheckAndMutateChecksFailed() { - return 401; - } - - @Override - public long getCheckAndMutateChecksPassed() { - return 405; - } - - @Override - public long getStoreFileIndexSize() { - return 406; - } - - @Override - public long getTotalStaticIndexSize() { - return 407; - } - - @Override - public long getTotalStaticBloomSize() { - return 408; - } - - @Override - public long getNumMutationsWithoutWAL() { - return 409; - } - - @Override - public long getDataInMemoryWithoutWAL() { - return 410; - } - - @Override - public double getPercentFileLocal() { - return 99; - } - - @Override - public double getPercentFileLocalSecondaryRegions() { - return 99; - } - - @Override - public int getCompactionQueueSize() { - return 411; - } - - @Override - public int getSmallCompactionQueueSize() { - return 0; - } - - @Override - public int getLargeCompactionQueueSize() { - return 0; - } - - @Override - public int getFlushQueueSize() { - return 412; - } - - @Override - public long getBlockCacheFreeSize() { - return 413; - } - - @Override - public long getBlockCacheCount() { - return 414; - } - - @Override - public long getBlockCacheSize() { - return 415; - } - - @Override - public long getBlockCacheHitCount() { - return 416; - } - - @Override - public long getBlockCachePrimaryHitCount() { - return 422; - } - - @Override - public long getBlockCacheMissCount() { - return 417; - } - - @Override - public long getBlockCachePrimaryMissCount() { - return 421; - } - - @Override - public long getBlockCacheEvictedCount() { - return 418; - } - - @Override - public long getBlockCachePrimaryEvictedCount() { - return 420; - } - - @Override - public double getBlockCacheHitPercent() { - return 98; - } - - @Override - public double getBlockCacheHitCachingPercent() { - return 97; - } - - @Override - public long getBlockCacheFailedInsertions() { - return 36; - } - - @Override - public long getUpdatesBlockedTime() { - return 419; - } - - @Override - public void forceRecompute() { - //IGNORED. - } - - @Override - public long getNumWALFiles() { - return 10; - } - - @Override - public long getWALFileSize() { - return 1024000; - } - - @Override - public long getNumWALSlowAppend() { - return 0; - } - - @Override - public long getFlushedCellsCount() { - return 100000000; - } - - @Override - public long getCompactedCellsCount() { - return 10000000; - } - - @Override - public long getMajorCompactedCellsCount() { - return 1000000; - } - - @Override - public long getFlushedCellsSize() { - return 1024000000; - } - - @Override - public long getCompactedCellsSize() { - return 102400000; - } - - @Override - public long getMajorCompactedCellsSize() { - return 10240000; - } - - @Override - public long getHedgedReadOps() { - return 100; - } - - @Override - public long getHedgedReadWins() { - return 10; - } - - @Override - public long getBlockedRequestsCount() { - return 0; - } - - @Override - public int getSplitQueueSize() { - return 0; - } - - @Override - public long getCellsCountCompactedToMob() { - return 20; - } - - @Override - public long getCellsCountCompactedFromMob() { - return 10; - } - - @Override - public long getCellsSizeCompactedToMob() { - return 200; - } - - @Override - public long getCellsSizeCompactedFromMob() { - return 100; - } - - @Override - public long getMobFlushCount() { - return 1; - } - - @Override - public long getMobFlushedCellsCount() { - return 10; - } - - @Override - public long getMobFlushedCellsSize() { - return 1000; - } - - @Override - public long getMobScanCellsCount() { - return 10; - } - - @Override - public long getMobScanCellsSize() { - return 1000; - } - - @Override - public long getMobFileCacheAccessCount() { - return 100; - } - - @Override - public long getMobFileCacheMissCount() { - return 50; - } - - @Override - public long getMobFileCacheEvictedCount() { - return 0; - } - - @Override - public long getMobFileCacheCount() { - return 100; - } - - @Override - public double getMobFileCacheHitPercent() { - return 50; - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java deleted file mode 100644 index c43ccc3..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -public class MetricsRegionWrapperStub implements MetricsRegionWrapper { - int replicaid = 0; - - /** - * Replica ID set to 0 - */ - public MetricsRegionWrapperStub() { - this.replicaid = 0; - } - - /** - * Pass in replica ID - */ - public MetricsRegionWrapperStub(int replicaid) { - this.replicaid = replicaid; - } - - @Override - public String getTableName() { - return "MetricsRegionWrapperStub"; - } - - @Override - public String getNamespace() { - return "TestNS"; - } - - @Override - public String getRegionName() { - return "DEADBEEF001"; - } - - @Override - public long getNumStores() { - return 101; - } - - @Override - public long getNumStoreFiles() { - return 102; - } - - @Override - public long getMemstoreSize() { - return 103; - } - - @Override - public long getStoreFileSize() { - return 104; - } - - @Override - public long getReadRequestCount() { - return 105; - } - - @Override - public long getWriteRequestCount() { - return 106; - } - - @Override - public long getNumFilesCompacted() { - return 0; - } - - @Override - public long getNumBytesCompacted() { - return 0; - } - - @Override - public long getNumCompactionsCompleted() { - return 0; - } - - @Override - public int getRegionHashCode() { - return 42; - } - - /** - * Get the replica id of this region. - */ - @Override - public int getReplicaId() { - return replicaid; - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 35de488..d906a48 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -76,7 +76,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestCase; @@ -138,13 +137,10 @@ import org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem; import org.apache.hadoop.hbase.regionserver.handler.FinishRegionRecoveringHandler; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; -import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; -import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -217,9 +213,6 @@ public class TestHRegion { protected final byte[] row = Bytes.toBytes("rowA"); protected final byte[] row2 = Bytes.toBytes("rowB"); - protected final MetricsAssertHelper metricsAssertHelper = CompatibilitySingletonFactory - .getInstance(MetricsAssertHelper.class); - @Before public void setup() throws IOException { TEST_UTIL = HBaseTestingUtility.createLocalHTU(); @@ -369,7 +362,7 @@ public class TestHRegion { final Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, logDir); return (new WALFactory(walConf, - Collections.singletonList(new MetricsWAL()), callingMethod)) + Collections.emptyList(), callingMethod)) .getWAL(tableName.toBytes(), tableName.getNamespace()); } @@ -1478,11 +1471,7 @@ public class TestHRegion { byte[] qual = Bytes.toBytes("qual"); byte[] val = Bytes.toBytes("val"); this.region = initHRegion(TableName.valueOf(getName()), getName(), CONF, cf); - MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); try { - long syncs = metricsAssertHelper.getCounter("syncTimeNumOps", source); - metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source); - LOG.info("First a batch put with all valid puts"); final Put[] puts = new Put[10]; for (int i = 0; i < 10; i++) { @@ -1495,7 +1484,6 @@ public class TestHRegion { for (int i = 0; i < 10; i++) { assertEquals(OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode()); } - metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 1, source); LOG.info("Next a batch put with one invalid family"); puts[5].addColumn(Bytes.toBytes("BAD_CF"), qual, val); @@ -1506,7 +1494,6 @@ public class TestHRegion { codes[i].getOperationStatusCode()); } - metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 2, source); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region = null; @@ -1519,11 +1506,7 @@ public class TestHRegion { byte[] qual = Bytes.toBytes("qual"); byte[] val = Bytes.toBytes("val"); this.region = initHRegion(TableName.valueOf(getName()), getName(), CONF, cf); - MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); try { - long syncs = metricsAssertHelper.getCounter("syncTimeNumOps", source); - metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source); - final Put[] puts = new Put[10]; for (int i = 0; i < 10; i++) { puts[i] = new Put(Bytes.toBytes("row_" + i)); @@ -1584,7 +1567,6 @@ public class TestHRegion { rowLock1.release(); rowLock2.release(); rowLock3.release(); - waitForCounter(source, "syncTimeNumOps", syncs + 1); LOG.info("...joining on put thread"); ctx.stop(); @@ -1602,19 +1584,6 @@ public class TestHRegion { } } - private void waitForCounter(MetricsWALSource source, String metricName, long expectedCount) - throws InterruptedException { - long startWait = System.currentTimeMillis(); - long currentCount; - while ((currentCount = metricsAssertHelper.getCounter(metricName, source)) < expectedCount) { - Thread.sleep(100); - if (System.currentTimeMillis() - startWait > 10000) { - fail(String.format("Timed out waiting for '%s' >= '%s', currentCount=%s", metricName, - expectedCount, currentCount)); - } - } - } - @Test public void testBatchPutWithTsSlop() throws Exception { TableName b = TableName.valueOf(getName()); @@ -1627,10 +1596,6 @@ public class TestHRegion { this.region = initHRegion(b, getName(), CONF, cf); try { - MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); - long syncs = metricsAssertHelper.getCounter("syncTimeNumOps", source); - metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source); - final Put[] puts = new Put[10]; for (int i = 0; i < 10; i++) { puts[i] = new Put(Bytes.toBytes("row_" + i), Long.MAX_VALUE - 100); @@ -1642,7 +1607,6 @@ public class TestHRegion { for (int i = 0; i < 10; i++) { assertEquals(OperationStatusCode.SANITY_CHECK_FAILURE, codes[i].getOperationStatusCode()); } - metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -4916,7 +4880,7 @@ public class TestHRegion { Configuration confForWAL = new Configuration(conf); confForWAL.set(HConstants.HBASE_DIR, rootDir.toString()); return new WALFactory(confForWAL, - Collections.singletonList(new MetricsWAL()), + Collections.emptyList(), "hregion-" + RandomStringUtils.randomNumeric(8)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java deleted file mode 100644 index e739890..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.hbase.CompatibilityFactory; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({RegionServerTests.class, SmallTests.class}) -public class TestMetricsRegion { - - - public MetricsAssertHelper HELPER = CompatibilityFactory.getInstance(MetricsAssertHelper.class); - - @Test - public void testRegionWrapperMetrics() { - MetricsRegion mr = new MetricsRegion(new MetricsRegionWrapperStub()); - MetricsRegionAggregateSource agg = mr.getSource().getAggregateSource(); - - HELPER.assertGauge( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeCount", - 101, agg); - HELPER.assertGauge( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeFileCount", - 102, agg); - HELPER.assertGauge( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize", - 103, agg); - HELPER.assertCounter( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid", - 0, agg); - mr.close(); - - // test region with replica id > 0 - mr = new MetricsRegion(new MetricsRegionWrapperStub(1)); - agg = mr.getSource().getAggregateSource(); - HELPER.assertGauge( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeCount", - 101, agg); - HELPER.assertGauge( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeFileCount", - 102, agg); - HELPER.assertGauge( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize", - 103, agg); - HELPER.assertCounter( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid", - 1, agg); - mr.close(); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java deleted file mode 100644 index f3ce0bd..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.hbase.CompatibilityFactory; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertNotNull; - -/** - * Unit test version of rs metrics tests. - */ -@Category({RegionServerTests.class, SmallTests.class}) -public class TestMetricsRegionServer { - public static MetricsAssertHelper HELPER = - CompatibilityFactory.getInstance(MetricsAssertHelper.class); - - private MetricsRegionServerWrapperStub wrapper; - private MetricsRegionServer rsm; - private MetricsRegionServerSource serverSource; - - @BeforeClass - public static void classSetUp() { - HELPER.init(); - } - - @Before - public void setUp() { - wrapper = new MetricsRegionServerWrapperStub(); - rsm = new MetricsRegionServer(wrapper); - serverSource = rsm.getMetricsSource(); - } - - @Test - public void testWrapperSource() { - HELPER.assertTag("serverName", "test", serverSource); - HELPER.assertTag("clusterId", "tClusterId", serverSource); - HELPER.assertTag("zookeeperQuorum", "zk", serverSource); - HELPER.assertGauge("regionServerStartTime", 100, serverSource); - HELPER.assertGauge("regionCount", 101, serverSource); - HELPER.assertGauge("storeCount", 2, serverSource); - HELPER.assertGauge("hlogFileCount", 10, serverSource); - HELPER.assertGauge("hlogFileSize", 1024000, serverSource); - HELPER.assertGauge("storeFileCount", 300, serverSource); - HELPER.assertGauge("memstoreSize", 1025, serverSource); - HELPER.assertGauge("storeFileSize", 1900, serverSource); - HELPER.assertCounter("totalRequestCount", 899, serverSource); - HELPER.assertCounter("readRequestCount", 997, serverSource); - HELPER.assertCounter("writeRequestCount", 707, serverSource); - HELPER.assertCounter("checkMutateFailedCount", 401, serverSource); - HELPER.assertCounter("checkMutatePassedCount", 405, serverSource); - HELPER.assertGauge("storeFileIndexSize", 406, serverSource); - HELPER.assertGauge("staticIndexSize", 407, serverSource); - HELPER.assertGauge("staticBloomSize", 408, serverSource); - HELPER.assertGauge("mutationsWithoutWALCount", 409, serverSource); - HELPER.assertGauge("mutationsWithoutWALSize", 410, serverSource); - HELPER.assertGauge("percentFilesLocal", 99, serverSource); - HELPER.assertGauge("percentFilesLocalSecondaryRegions", 99, serverSource); - HELPER.assertGauge("compactionQueueLength", 411, serverSource); - HELPER.assertGauge("flushQueueLength", 412, serverSource); - HELPER.assertGauge("blockCacheFreeSize", 413, serverSource); - HELPER.assertGauge("blockCacheCount", 414, serverSource); - HELPER.assertGauge("blockCacheSize", 415, serverSource); - HELPER.assertCounter("blockCacheHitCount", 416, serverSource); - HELPER.assertCounter("blockCacheMissCount", 417, serverSource); - HELPER.assertCounter("blockCacheEvictionCount", 418, serverSource); - HELPER.assertGauge("blockCacheCountHitPercent", 98, serverSource); - HELPER.assertGauge("blockCacheExpressHitPercent", 97, serverSource); - HELPER.assertCounter("blockCacheFailedInsertionCount", 36, serverSource); - HELPER.assertCounter("updatesBlockedTime", 419, serverSource); - } - - @Test - public void testConstuctor() { - assertNotNull("There should be a hadoop1/hadoop2 metrics source", rsm.getMetricsSource() ); - assertNotNull("The RegionServerMetricsWrapper should be accessable", rsm.getRegionServerWrapper()); - } - - @Test - public void testSlowCount() { - for (int i=0; i < 12; i ++) { - rsm.updateAppend(12); - rsm.updateAppend(1002); - } - for (int i=0; i < 13; i ++) { - rsm.updateDelete(13); - rsm.updateDelete(1003); - } - for (int i=0; i < 14; i ++) { - rsm.updateGet(14); - rsm.updateGet(1004); - } - for (int i=0; i < 15; i ++) { - rsm.updateIncrement(15); - rsm.updateIncrement(1005); - } - for (int i=0; i < 16; i ++) { - rsm.updatePut(16); - rsm.updatePut(1006); - } - - HELPER.assertCounter("appendNumOps", 24, serverSource); - HELPER.assertCounter("deleteNumOps", 26, serverSource); - HELPER.assertCounter("getNumOps", 28, serverSource); - HELPER.assertCounter("incrementNumOps", 30, serverSource); - HELPER.assertCounter("mutateNumOps", 32, serverSource); - - - HELPER.assertCounter("slowAppendCount", 12, serverSource); - HELPER.assertCounter("slowDeleteCount", 13, serverSource); - HELPER.assertCounter("slowGetCount", 14, serverSource); - HELPER.assertCounter("slowIncrementCount", 15, serverSource); - HELPER.assertCounter("slowPutCount", 16, serverSource); - } -} - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java index 732df4b..90678ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java @@ -94,10 +94,6 @@ public class TestRSStatusServlet { MasterAddressTracker mat = Mockito.mock(MasterAddressTracker.class); Mockito.doReturn(fakeMasterAddress).when(mat).getMasterAddress(); Mockito.doReturn(mat).when(rs).getMasterAddressTracker(); - - MetricsRegionServer rms = Mockito.mock(MetricsRegionServer.class); - Mockito.doReturn(new MetricsRegionServerWrapperStub()).when(rms).getRegionServerWrapper(); - Mockito.doReturn(rms).when(rs).getRegionServerMetrics(); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java deleted file mode 100644 index 79df5e8..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ /dev/null @@ -1,558 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.client.*; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.*; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - - -@Category({RegionServerTests.class, MediumTests.class}) -public class TestRegionServerMetrics { - private static MetricsAssertHelper metricsHelper; - - static { - Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.DEBUG); - } - - private static MiniHBaseCluster cluster; - private static HRegionServer rs; - private static Configuration conf; - private static HBaseTestingUtility TEST_UTIL; - private static MetricsRegionServer metricsRegionServer; - private static MetricsRegionServerSource serverSource; - private static final int NUM_SCAN_NEXT = 30; - private static int numScanNext = 0; - - @BeforeClass - public static void startCluster() throws Exception { - metricsHelper = CompatibilityFactory.getInstance(MetricsAssertHelper.class); - TEST_UTIL = new HBaseTestingUtility(); - conf = TEST_UTIL.getConfiguration(); - conf.getLong("hbase.splitlog.max.resubmit", 0); - // Make the failure test faster - conf.setInt("zookeeper.recovery.retry", 0); - conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1); - - TEST_UTIL.startMiniCluster(1, 1); - cluster = TEST_UTIL.getHBaseCluster(); - - cluster.waitForActiveAndReadyMaster(); - - while (cluster.getLiveRegionServerThreads().size() < 1) { - Threads.sleep(100); - } - - rs = cluster.getRegionServer(0); - metricsRegionServer = rs.getRegionServerMetrics(); - serverSource = metricsRegionServer.getMetricsSource(); - } - - @AfterClass - public static void after() throws Exception { - if (TEST_UTIL != null) { - TEST_UTIL.shutdownMiniCluster(); - } - } - - @Test(timeout = 300000) - public void testRegionCount() throws Exception { - String regionMetricsKey = "regionCount"; - long regions = metricsHelper.getGaugeLong(regionMetricsKey, serverSource); - // Creating a table should add one region - TEST_UTIL.createTable(TableName.valueOf("table"), Bytes.toBytes("cf")); - metricsHelper.assertGaugeGt(regionMetricsKey, regions, serverSource); - } - - @Test - public void testLocalFiles() throws Exception { - metricsHelper.assertGauge("percentFilesLocal", 0, serverSource); - metricsHelper.assertGauge("percentFilesLocalSecondaryRegions", 0, serverSource); - } - - @Test - public void testRequestCount() throws Exception { - String tableNameString = "testRequestCount"; - TableName tName = TableName.valueOf(tableNameString); - byte[] cfName = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] initValue = Bytes.toBytes("Value"); - - TEST_UTIL.createTable(tName, cfName); - - Connection connection = TEST_UTIL.getConnection(); - connection.getTable(tName).close(); //wait for the table to come up. - - // Do a first put to be sure that the connection is established, meta is there and so on. - Table table = connection.getTable(tName); - Put p = new Put(row); - p.addColumn(cfName, qualifier, initValue); - table.put(p); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - long requests = metricsHelper.getCounter("totalRequestCount", serverSource); - long readRequests = metricsHelper.getCounter("readRequestCount", serverSource); - long writeRequests = metricsHelper.getCounter("writeRequestCount", serverSource); - - for (int i=0; i< 30; i++) { - table.put(p); - } - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("totalRequestCount", requests + 30, serverSource); - metricsHelper.assertCounter("readRequestCount", readRequests, serverSource); - metricsHelper.assertCounter("writeRequestCount", writeRequests + 30, serverSource); - - Get g = new Get(row); - for (int i=0; i< 10; i++) { - table.get(g); - } - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("totalRequestCount", requests + 40, serverSource); - metricsHelper.assertCounter("readRequestCount", readRequests + 10, serverSource); - metricsHelper.assertCounter("writeRequestCount", writeRequests + 30, serverSource); - - try (RegionLocator locator = connection.getRegionLocator(tName)) { - for ( HRegionLocation location: locator.getAllRegionLocations()) { - HRegionInfo i = location.getRegionInfo(); - MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); - String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+ - "_table_"+tableNameString + - "_region_" + i.getEncodedName()+ - "_metric"; - metricsHelper.assertCounter(prefix + "_getNumOps", 10, agg); - metricsHelper.assertCounter(prefix + "_mutateCount", 31, agg); - } - } - List gets = new ArrayList(); - for (int i=0; i< 10; i++) { - gets.add(new Get(row)); - } - table.get(gets); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("totalRequestCount", requests + 50, serverSource); - metricsHelper.assertCounter("readRequestCount", readRequests + 20, serverSource); - metricsHelper.assertCounter("writeRequestCount", writeRequests + 30, serverSource); - - List puts = new ArrayList<>(); - for (int i=0; i< 30; i++) { - puts.add(p); - } - table.put(puts); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("totalRequestCount", requests + 80, serverSource); - metricsHelper.assertCounter("readRequestCount", readRequests + 20, serverSource); - metricsHelper.assertCounter("writeRequestCount", writeRequests + 60, serverSource); - - table.close(); - } - - @Test - public void testMutationsWithoutWal() throws Exception { - TableName tableName = TableName.valueOf("testMutationsWithoutWal"); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("Value"); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - - Table t = TEST_UTIL.createTable(tableName, cf); - - Put p = new Put(row); - p.addColumn(cf, qualifier, val); - p.setDurability(Durability.SKIP_WAL); - - t.put(p); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertGauge("mutationsWithoutWALCount", 1, serverSource); - long minLength = row.length + cf.length + qualifier.length + val.length; - metricsHelper.assertGaugeGt("mutationsWithoutWALSize", minLength, serverSource); - - t.close(); - } - - @Test - public void testStoreCount() throws Exception { - TableName tableName = TableName.valueOf("testStoreCount"); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("Value"); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - long stores = metricsHelper.getGaugeLong("storeCount", serverSource); - long storeFiles = metricsHelper.getGaugeLong("storeFileCount", serverSource); - - //Force a hfile. - Table t = TEST_UTIL.createTable(tableName, cf); - Put p = new Put(row); - p.addColumn(cf, qualifier, val); - t.put(p); - TEST_UTIL.getHBaseAdmin().flush(tableName); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertGauge("storeCount", stores +1, serverSource); - metricsHelper.assertGauge("storeFileCount", storeFiles + 1, serverSource); - - t.close(); - } - - @Test - public void testCheckAndPutCount() throws Exception { - String tableNameString = "testCheckAndPutCount"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] valOne = Bytes.toBytes("Value"); - byte[] valTwo = Bytes.toBytes("ValueTwo"); - byte[] valThree = Bytes.toBytes("ValueThree"); - - Table t = TEST_UTIL.createTable(tableName, cf); - Put p = new Put(row); - p.addColumn(cf, qualifier, valOne); - t.put(p); - - Put pTwo = new Put(row); - pTwo.addColumn(cf, qualifier, valTwo); - t.checkAndPut(row, cf, qualifier, valOne, pTwo); - - Put pThree = new Put(row); - pThree.addColumn(cf, qualifier, valThree); - t.checkAndPut(row, cf, qualifier, valOne, pThree); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("checkMutateFailedCount", 1, serverSource); - metricsHelper.assertCounter("checkMutatePassedCount", 1, serverSource); - - t.close(); - } - - @Test - public void testIncrement() throws Exception { - String tableNameString = "testIncrement"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes(0l); - - - Table t = TEST_UTIL.createTable(tableName, cf); - Put p = new Put(row); - p.addColumn(cf, qualifier, val); - t.put(p); - - for(int count = 0; count< 13; count++) { - Increment inc = new Increment(row); - inc.addColumn(cf, qualifier, 100); - t.increment(inc); - } - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("incrementNumOps", 13, serverSource); - - t.close(); - } - - @Test - public void testAppend() throws Exception { - String tableNameString = "testAppend"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("One"); - - - Table t = TEST_UTIL.createTable(tableName, cf); - Put p = new Put(row); - p.addColumn(cf, qualifier, val); - t.put(p); - - for(int count = 0; count< 73; count++) { - Append append = new Append(row); - append.add(cf, qualifier, Bytes.toBytes(",Test")); - t.append(append); - } - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("appendNumOps", 73, serverSource); - - t.close(); - } - - @Test - public void testScanNext() throws IOException { - String tableNameString = "testScanNext"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("One"); - - List puts = new ArrayList<>(); - for (int insertCount =0; insertCount < 100; insertCount++) { - Put p = new Put(Bytes.toBytes("" + insertCount + "row")); - p.addColumn(cf, qualifier, val); - puts.add(p); - } - try (Table t = TEST_UTIL.createTable(tableName, cf)) { - t.put(puts); - - Scan s = new Scan(); - s.setBatch(1); - s.setCaching(1); - ResultScanner resultScanners = t.getScanner(s); - - for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { - Result result = resultScanners.next(); - assertNotNull(result); - assertEquals(1, result.size()); - } - } - numScanNext += NUM_SCAN_NEXT; - try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - for ( HRegionLocation location: locator.getAllRegionLocations()) { - HRegionInfo i = location.getRegionInfo(); - MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); - String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+ - "_table_"+tableNameString + - "_region_" + i.getEncodedName()+ - "_metric"; - metricsHelper.assertCounter(prefix + "_scanNextNumOps", NUM_SCAN_NEXT, agg); - } - metricsHelper.assertCounter("ScanNext_num_ops", numScanNext, serverSource); - } - try (Admin admin = TEST_UTIL.getHBaseAdmin()) { - admin.disableTable(tableName); - admin.deleteTable(tableName); - } - } - - @Test - public void testScanNextForSmallScan() throws IOException { - String tableNameString = "testScanNextSmall"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("One"); - - List puts = new ArrayList<>(); - for (int insertCount =0; insertCount < 100; insertCount++) { - Put p = new Put(Bytes.toBytes("" + insertCount + "row")); - p.addColumn(cf, qualifier, val); - puts.add(p); - } - try (Table t = TEST_UTIL.createTable(tableName, cf)) { - t.put(puts); - - Scan s = new Scan(); - s.setSmall(true); - s.setCaching(1); - ResultScanner resultScanners = t.getScanner(s); - - for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { - Result result = resultScanners.next(); - assertNotNull(result); - assertEquals(1, result.size()); - } - } - numScanNext += NUM_SCAN_NEXT; - try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - for ( HRegionLocation location: locator.getAllRegionLocations()) { - HRegionInfo i = location.getRegionInfo(); - MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); - String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+ - "_table_"+tableNameString + - "_region_" + i.getEncodedName()+ - "_metric"; - metricsHelper.assertCounter(prefix + "_scanNextNumOps", NUM_SCAN_NEXT, agg); - } - metricsHelper.assertCounter("ScanNext_num_ops", numScanNext, serverSource); - } - try (Admin admin = TEST_UTIL.getHBaseAdmin()) { - admin.disableTable(tableName); - admin.deleteTable(tableName); - } - } - - @Test - public void testMobMetrics() throws IOException, InterruptedException { - String tableNameString = "testMobMetrics"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("mobdata"); - int numHfiles = conf.getInt("hbase.hstore.compactionThreshold", 3) - 1; - HTableDescriptor htd = new HTableDescriptor(tableName); - HColumnDescriptor hcd = new HColumnDescriptor(cf); - hcd.setMobEnabled(true); - hcd.setMobThreshold(0); - htd.addFamily(hcd); - Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin(); - HTable t = TEST_UTIL.createTable(htd, new byte[0][0], conf); - Region region = rs.getOnlineRegions(tableName).get(0); - t.setAutoFlush(true, true); - for (int insertCount = 0; insertCount < numHfiles; insertCount++) { - Put p = new Put(Bytes.toBytes(insertCount)); - p.addColumn(cf, qualifier, val); - t.put(p); - admin.flush(tableName); - } - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("mobFlushCount", numHfiles, serverSource); - Scan scan = new Scan(Bytes.toBytes(0), Bytes.toBytes(2)); - ResultScanner scanner = t.getScanner(scan); - scanner.next(100); - numScanNext++; // this is an ugly construct - scanner.close(); - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("mobScanCellsCount", 2, serverSource); - region.getTableDesc().getFamily(cf).setMobThreshold(100); - ((HRegion)region).initialize(); - region.compact(true); - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("cellsCountCompactedFromMob", numHfiles, - serverSource); - metricsHelper.assertCounter("cellsCountCompactedToMob", 0, serverSource); - scanner = t.getScanner(scan); - scanner.next(100); - numScanNext++; // this is an ugly construct - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - // metrics are reset by the region initialization - metricsHelper.assertCounter("mobScanCellsCount", 0, serverSource); - for (int insertCount = numHfiles; - insertCount < 2 * numHfiles - 1; insertCount++) { - Put p = new Put(Bytes.toBytes(insertCount)); - p.addColumn(cf, qualifier, val); - t.put(p); - admin.flush(tableName); - } - region.getTableDesc().getFamily(cf).setMobThreshold(0); - ((HRegion)region).initialize(); - region.compact(true); - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - // metrics are reset by the region initialization - metricsHelper.assertCounter("cellsCountCompactedFromMob", 0, serverSource); - metricsHelper.assertCounter("cellsCountCompactedToMob", 2 * numHfiles - 1, - serverSource); - t.close(); - admin.close(); - connection.close(); - } - - @Test - public void testRangeCountMetrics() throws Exception { - String tableNameString = "testRangeCountMetrics"; - final long[] timeranges = - { 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 }; - final String timeRangeType = "TimeRangeCount"; - final String timeRangeMetricName = "Mutate"; - boolean timeRangeCountUpdated = false; - - TableName tName = TableName.valueOf(tableNameString); - byte[] cfName = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] initValue = Bytes.toBytes("Value"); - - TEST_UTIL.createTable(tName, cfName); - - Connection connection = TEST_UTIL.getConnection(); - connection.getTable(tName).close(); // wait for the table to come up. - - // Do a first put to be sure that the connection is established, meta is there and so on. - Table table = connection.getTable(tName); - Put p = new Put(row); - p.addColumn(cfName, qualifier, initValue); - table.put(p); - - // do some puts and gets - for (int i = 0; i < 10; i++) { - table.put(p); - } - - Get g = new Get(row); - for (int i = 0; i < 10; i++) { - table.get(g); - } - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - - // Check some time range counters were updated - long prior = 0; - - String dynamicMetricName; - for (int i = 0; i < timeranges.length; i++) { - dynamicMetricName = - timeRangeMetricName + "_" + timeRangeType + "_" + prior + "-" + timeranges[i]; - if (metricsHelper.checkCounterExists(dynamicMetricName, serverSource)) { - long count = metricsHelper.getCounter(dynamicMetricName, serverSource); - if (count > 0) { - timeRangeCountUpdated = true; - break; - } - } - prior = timeranges[i]; - } - dynamicMetricName = - timeRangeMetricName + "_" + timeRangeType + "_" + timeranges[timeranges.length - 1] + "-inf"; - if (metricsHelper.checkCounterExists(dynamicMetricName, serverSource)) { - long count = metricsHelper.getCounter(dynamicMetricName, serverSource); - if (count > 0) { - timeRangeCountUpdated = true; - } - } - assertEquals(true, timeRangeCountUpdated); - - table.close(); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRemoveRegionMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRemoveRegionMetrics.java deleted file mode 100644 index 9e1a61e..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRemoveRegionMetrics.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CompatibilityFactory; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.RegionLocator; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; -import org.apache.hadoop.hbase.testclassification.LargeTests; - -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import java.io.IOException; - -@Category({RegionServerTests.class, LargeTests.class}) -public class TestRemoveRegionMetrics { - - private static MiniHBaseCluster cluster; - private static Configuration conf; - private static HBaseTestingUtility TEST_UTIL; - private static MetricsAssertHelper metricsHelper; - - @BeforeClass - public static void startCluster() throws Exception { - metricsHelper = CompatibilityFactory.getInstance(MetricsAssertHelper.class); - TEST_UTIL = new HBaseTestingUtility(); - conf = TEST_UTIL.getConfiguration(); - conf.getLong("hbase.splitlog.max.resubmit", 0); - // Make the failure test faster - conf.setInt("zookeeper.recovery.retry", 0); - conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1); - - TEST_UTIL.startMiniCluster(1, 2); - cluster = TEST_UTIL.getHBaseCluster(); - - cluster.waitForActiveAndReadyMaster(); - - while (cluster.getLiveRegionServerThreads().size() < 2) { - Threads.sleep(100); - } - } - - - @Test - public void testMoveRegion() throws IOException, InterruptedException { - String tableNameString = "testMoveRegion"; - TableName tableName = TableName.valueOf(tableNameString); - Table t = TEST_UTIL.createTable(tableName, Bytes.toBytes("D")); - TEST_UTIL.waitUntilAllRegionsAssigned(t.getName()); - Admin admin = TEST_UTIL.getHBaseAdmin(); - HRegionInfo regionInfo; - byte[] row = Bytes.toBytes("r1"); - - - for (int i = 0; i < 30; i++) { - boolean moved = false; - try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - regionInfo = locator.getRegionLocation(row, true).getRegionInfo(); - } - - int currentServerIdx = cluster.getServerWith(regionInfo.getRegionName()); - int destServerIdx = (currentServerIdx +1)% cluster.getLiveRegionServerThreads().size(); - HRegionServer currentServer = cluster.getRegionServer(currentServerIdx); - HRegionServer destServer = cluster.getRegionServer(destServerIdx); - byte[] destServerName = Bytes.toBytes(destServer.getServerName().getServerName()); - - - // Do a put. The counters should be non-zero now - Put p = new Put(row); - p.addColumn(Bytes.toBytes("D"), Bytes.toBytes("Zero"), Bytes.toBytes("VALUE")); - t.put(p); - - - MetricsRegionAggregateSource currentAgg = currentServer.getRegion(regionInfo.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); - - String prefix = "namespace_"+ NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+ - "_table_"+tableNameString + - "_region_" + regionInfo.getEncodedName()+ - "_metric"; - - metricsHelper.assertCounter(prefix + "_mutateCount", 1, currentAgg); - - - try { - admin.move(regionInfo.getEncodedNameAsBytes(), destServerName); - moved = true; - Thread.sleep(5000); - } catch (IOException ioe) { - moved = false; - } - TEST_UTIL.waitUntilAllRegionsAssigned(t.getName()); - - if (moved) { - MetricsRegionAggregateSource destAgg = destServer.getRegion(regionInfo.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); - metricsHelper.assertCounter(prefix + "_mutateCount", 0, destAgg); - } - } - - TEST_UTIL.deleteTable(tableName); - - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWAL.java deleted file mode 100644 index ba6915b..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWAL.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver.wal; - -import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import java.util.concurrent.TimeUnit; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -@Category({MiscTests.class, SmallTests.class}) -public class TestMetricsWAL { - @Test - public void testLogRollRequested() throws Exception { - MetricsWALSource source = mock(MetricsWALSourceImpl.class); - MetricsWAL metricsWAL = new MetricsWAL(source); - metricsWAL.logRollRequested(false); - metricsWAL.logRollRequested(true); - - // Log roll was requested twice - verify(source, times(2)).incrementLogRollRequested(); - // One was because of low replication on the hlog. - verify(source, times(1)).incrementLowReplicationLogRoll(); - } - - @Test - public void testPostSync() throws Exception { - long nanos = TimeUnit.MILLISECONDS.toNanos(145); - MetricsWALSource source = mock(MetricsWALSourceImpl.class); - MetricsWAL metricsWAL = new MetricsWAL(source); - metricsWAL.postSync(nanos, 1); - verify(source, times(1)).incrementSyncTime(145); - } - - @Test - public void testSlowAppend() throws Exception { - MetricsWALSource source = new MetricsWALSourceImpl(); - MetricsWAL metricsWAL = new MetricsWAL(source); - // One not so slow append (< 1000) - metricsWAL.postAppend(1, 900); - // Two slow appends (> 1000) - metricsWAL.postAppend(1, 1010); - metricsWAL.postAppend(1, 2000); - assertEquals(2, source.getSlowAppendCount()); - } -} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java index abe484e..7e9db04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java @@ -27,7 +27,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager; @@ -43,7 +42,7 @@ public class ReplicationSourceDummy implements ReplicationSourceInterface { @Override public void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, ReplicationQueues rq, ReplicationPeers rp, Stoppable stopper, String peerClusterId, - UUID clusterId, ReplicationEndpoint replicationEndpoint, MetricsSource metrics) + UUID clusterId, ReplicationEndpoint replicationEndpoint) throws IOException { this.manager = manager; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java index a870ed8..549a979 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java @@ -247,7 +247,6 @@ public class TestRegionReplicaReplicationEndpointNoMaster { ReplicationEndpoint.Context context = mock(ReplicationEndpoint.Context.class); when(context.getConfiguration()).thenReturn(HTU.getConfiguration()); - when(context.getMetrics()).thenReturn(mock(MetricsSource.class)); replicator.init(context); replicator.start(); @@ -278,7 +277,6 @@ public class TestRegionReplicaReplicationEndpointNoMaster { ReplicationEndpoint.Context context = mock(ReplicationEndpoint.Context.class); when(context.getConfiguration()).thenReturn(HTU.getConfiguration()); - when(context.getMetrics()).thenReturn(mock(MetricsSource.class)); ReplicationPeer mockPeer = mock(ReplicationPeer.class); when(mockPeer.getTableCFs()).thenReturn(null); diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml index 7c3754e..a59da0f 100644 --- a/hbase-shell/pom.xml +++ b/hbase-shell/pom.xml @@ -221,15 +221,6 @@ org.apache.hbase - hbase-hadoop-compat - - - org.apache.hbase - ${compat.module} - ${project.version} - - - org.apache.hbase hbase-testing-util test diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml index 2efbca0..39d2750 100644 --- a/hbase-spark/pom.xml +++ b/hbase-spark/pom.xml @@ -340,161 +340,6 @@ org.apache.hbase - hbase-hadoop-compat - ${project.version} - test - test-jar - - - log4j - log4j - - - org.apache.thrift - thrift - - - org.jruby - jruby-complete - - - org.slf4j - slf4j-log4j12 - - - org.mortbay.jetty - jsp-2.1 - - - org.mortbay.jetty - jsp-api-2.1 - - - org.mortbay.jetty - servlet-api-2.5 - - - com.sun.jersey - jersey-core - - - com.sun.jersey - jersey-json - - - com.sun.jersey - jersey-server - - - org.mortbay.jetty - jetty - - - org.mortbay.jetty - jetty-util - - - tomcat - jasper-runtime - - - tomcat - jasper-compiler - - - org.jruby - jruby-complete - - - org.jboss.netty - netty - - - io.netty - netty - - - - - - org.apache.hbase - hbase-hadoop2-compat - ${project.version} - test - test-jar - - - log4j - log4j - - - org.apache.thrift - thrift - - - org.jruby - jruby-complete - - - org.slf4j - slf4j-log4j12 - - - org.mortbay.jetty - jsp-2.1 - - - org.mortbay.jetty - jsp-api-2.1 - - - org.mortbay.jetty - servlet-api-2.5 - - - com.sun.jersey - jersey-core - - - com.sun.jersey - jersey-json - - - com.sun.jersey - jersey-server - - - org.mortbay.jetty - jetty - - - org.mortbay.jetty - jetty-util - - - tomcat - jasper-runtime - - - tomcat - jasper-compiler - - - org.jruby - jruby-complete - - - org.jboss.netty - netty - - - io.netty - netty - - - - - org.apache.hbase hbase-server ${project.version} diff --git a/hbase-testing-util/pom.xml b/hbase-testing-util/pom.xml index 3e07288..fc682a0 100644 --- a/hbase-testing-util/pom.xml +++ b/hbase-testing-util/pom.xml @@ -91,30 +91,6 @@ compile - org.apache.hbase - hbase-hadoop-compat - jar - compile - - - org.apache.hbase - hbase-hadoop-compat - test-jar - compile - - - org.apache.hbase - ${compat.module} - jar - compile - - - org.apache.hbase - ${compat.module} - test-jar - compile - - org.slf4j slf4j-log4j12 ${slf4j.version} diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml index ff999dc..4ab0dcb 100644 --- a/hbase-thrift/pom.xml +++ b/hbase-thrift/pom.xml @@ -284,15 +284,6 @@ test - org.apache.hbase - hbase-hadoop-compat - - - org.apache.hbase - ${compat.module} - ${project.version} - - org.apache.htrace htrace-core diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java index 59e5856..c0f8276 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java @@ -38,12 +38,9 @@ public class CallQueue implements BlockingQueue { private static final Log LOG = LogFactory.getLog(CallQueue.class); private final BlockingQueue underlyingQueue; - private final ThriftMetrics metrics; - public CallQueue(BlockingQueue underlyingQueue, - ThriftMetrics metrics) { + public CallQueue(BlockingQueue underlyingQueue) { this.underlyingQueue = underlyingQueue; - this.metrics = metrics; } private static long now() { @@ -96,8 +93,6 @@ public class CallQueue implements BlockingQueue { if (result == null) { return; } - metrics.incTimeInQueue(result.timeInQueue()); - metrics.setCallQueueLen(this.size()); } @Override diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java index 51a0444..731688a 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java @@ -41,21 +41,18 @@ public class HbaseHandlerMetricsProxy implements InvocationHandler { HbaseHandlerMetricsProxy.class); private final Hbase.Iface handler; - private final ThriftMetrics metrics; public static Hbase.Iface newInstance(Hbase.Iface handler, - ThriftMetrics metrics, Configuration conf) { return (Hbase.Iface) Proxy.newProxyInstance( handler.getClass().getClassLoader(), new Class[]{Hbase.Iface.class}, - new HbaseHandlerMetricsProxy(handler, metrics, conf)); + new HbaseHandlerMetricsProxy(handler, conf)); } private HbaseHandlerMetricsProxy( - Hbase.Iface handler, ThriftMetrics metrics, Configuration conf) { + Hbase.Iface handler, Configuration conf) { this.handler = handler; - this.metrics = metrics; } @Override @@ -66,7 +63,6 @@ public class HbaseHandlerMetricsProxy implements InvocationHandler { long start = now(); result = m.invoke(handler, args); long processTime = now() - start; - metrics.incMethodTime(m.getName(), processTime); } catch (InvocationTargetException e) { throw e.getTargetException(); } catch (Exception e) { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java index 84613bd..f0df19f 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java @@ -139,14 +139,14 @@ public class TBoundedThreadPoolServer extends TServer { private Args serverOptions; - public TBoundedThreadPoolServer(Args options, ThriftMetrics metrics) { + public TBoundedThreadPoolServer(Args options) { super(options); if (options.maxQueuedRequests > 0) { this.callQueue = new CallQueue( - new LinkedBlockingQueue(options.maxQueuedRequests), metrics); + new LinkedBlockingQueue(options.maxQueuedRequests)); } else { - this.callQueue = new CallQueue(new SynchronousQueue(), metrics); + this.callQueue = new CallQueue(new SynchronousQueue()); } ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java deleted file mode 100644 index 883bbdc..0000000 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hbase.thrift; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -/** - * This class is for maintaining the various statistics of thrift server - * and publishing them through the metrics interfaces. - */ -@InterfaceAudience.Private -public class ThriftMetrics { - - - public enum ThriftServerType { - ONE, - TWO - } - - public MetricsThriftServerSource getSource() { - return source; - } - - public void setSource(MetricsThriftServerSource source) { - this.source = source; - } - - private MetricsThriftServerSource source; - private final long slowResponseTime; - public static final String SLOW_RESPONSE_NANO_SEC = - "hbase.thrift.slow.response.nano.second"; - public static final long DEFAULT_SLOW_RESPONSE_NANO_SEC = 10 * 1000 * 1000; - - - public ThriftMetrics(Configuration conf, ThriftServerType t) { - slowResponseTime = conf.getLong( SLOW_RESPONSE_NANO_SEC, DEFAULT_SLOW_RESPONSE_NANO_SEC); - - if (t == ThriftServerType.ONE) { - source = CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class).createThriftOneSource(); - } else if (t == ThriftServerType.TWO) { - source = CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class).createThriftTwoSource(); - } - - } - - public void incTimeInQueue(long time) { - source.incTimeInQueue(time); - } - - public void setCallQueueLen(int len) { - source.setCallQueueLen(len); - } - - public void incNumRowKeysInBatchGet(int diff) { - source.incNumRowKeysInBatchGet(diff); - } - - public void incNumRowKeysInBatchMutate(int diff) { - source.incNumRowKeysInBatchMutate(diff); - } - - public void incMethodTime(String name, long time) { - source.incMethodTime(name, time); - // inc general processTime - source.incCall(time); - if (time > slowResponseTime) { - source.incSlowCall(time); - } - } - -} diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java index 21e382b..b6902ab 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java @@ -192,7 +192,6 @@ public class ThriftServerRunner implements Runnable { volatile TServer tserver; volatile Server httpServer; private final Hbase.Iface handler; - private final ThriftMetrics metrics; private final HBaseHandler hbaseHandler; private final UserGroupInformation realUser; @@ -314,11 +313,9 @@ public class ThriftServerRunner implements Runnable { } this.conf = HBaseConfiguration.create(conf); this.listenPort = conf.getInt(PORT_CONF_KEY, DEFAULT_LISTEN_PORT); - this.metrics = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.ONE); this.hbaseHandler = new HBaseHandler(conf, userProvider); - this.hbaseHandler.initMetrics(metrics); this.handler = HbaseHandlerMetricsProxy.newInstance( - hbaseHandler, metrics, conf); + hbaseHandler, conf); this.realUser = userProvider.getCurrent().getUGI(); qop = conf.get(THRIFT_QOP_KEY); doAsEnabled = conf.getBoolean(THRIFT_SUPPORT_PROXYUSER, false); @@ -545,7 +542,7 @@ public class ThriftServerRunner implements Runnable { } else if (implType == ImplType.HS_HA) { THsHaServer.Args serverArgs = new THsHaServer.Args(serverTransport); CallQueue callQueue = - new CallQueue(new LinkedBlockingQueue(), metrics); + new CallQueue(new LinkedBlockingQueue()); ExecutorService executorService = createExecutor( callQueue, serverArgs.getMinWorkerThreads(), serverArgs.getMaxWorkerThreads()); serverArgs.executorService(executorService) @@ -557,7 +554,7 @@ public class ThriftServerRunner implements Runnable { TThreadedSelectorServer.Args serverArgs = new HThreadedSelectorServerArgs(serverTransport, conf); CallQueue callQueue = - new CallQueue(new LinkedBlockingQueue(), metrics); + new CallQueue(new LinkedBlockingQueue()); ExecutorService executorService = createExecutor( callQueue, serverArgs.getWorkerThreads(), serverArgs.getWorkerThreads()); serverArgs.executorService(executorService) @@ -588,7 +585,7 @@ public class ThriftServerRunner implements Runnable { + listenAddress + ":" + Integer.toString(listenPort) + " with readTimeout " + readTimeout + "ms; " + serverArgs); TBoundedThreadPoolServer tserver = - new TBoundedThreadPoolServer(serverArgs, metrics); + new TBoundedThreadPoolServer(serverArgs); this.tserver = tserver; } else { throw new AssertionError("Unsupported Thrift server implementation: " + @@ -652,7 +649,6 @@ public class ThriftServerRunner implements Runnable { // nextScannerId and scannerMap are used to manage scanner state protected int nextScannerId = 0; protected HashMap scannerMap = null; - private ThriftMetrics metrics = null; private final ConnectionCache connectionCache; IncrementCoalescer coalescer = null; @@ -1104,9 +1100,6 @@ public class ThriftServerRunner implements Runnable { try { List gets = new ArrayList(rows.size()); table = getTable(tableName); - if (metrics != null) { - metrics.incNumRowKeysInBatchGet(rows.size()); - } for (ByteBuffer row : rows) { Get get = new Get(getBytes(row)); addAttributes(get, attributes); @@ -1260,9 +1253,6 @@ public class ThriftServerRunner implements Runnable { Delete delete = new Delete(getBytes(row)); addAttributes(delete, attributes); - if (metrics != null) { - metrics.incNumRowKeysInBatchMutate(mutations.size()); - } // I apologize for all this mess :) for (Mutation m : mutations) { @@ -1743,10 +1733,6 @@ public class ThriftServerRunner implements Runnable { } } - private void initMetrics(ThriftMetrics metrics) { - this.metrics = metrics; - } - @Override public void increment(TIncrement tincrement) throws IOError, TException { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java index 1343149..cc4d9cb 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.thrift.ThriftMetrics; import org.apache.hadoop.hbase.thrift2.generated.TAppend; import org.apache.hadoop.hbase.thrift2.generated.TDelete; import org.apache.hadoop.hbase.thrift2.generated.TGet; @@ -93,18 +92,16 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { static final String MAX_IDLETIME = "hbase.thrift.connection.max-idletime"; public static THBaseService.Iface newInstance( - THBaseService.Iface handler, ThriftMetrics metrics) { + THBaseService.Iface handler) { return (THBaseService.Iface) Proxy.newProxyInstance(handler.getClass().getClassLoader(), - new Class[] { THBaseService.Iface.class }, new THBaseServiceMetricsProxy(handler, metrics)); + new Class[] { THBaseService.Iface.class }, new THBaseServiceMetricsProxy(handler)); } private static final class THBaseServiceMetricsProxy implements InvocationHandler { private final THBaseService.Iface handler; - private final ThriftMetrics metrics; - private THBaseServiceMetricsProxy(THBaseService.Iface handler, ThriftMetrics metrics) { + private THBaseServiceMetricsProxy(THBaseService.Iface handler) { this.handler = handler; - this.metrics = metrics; } @Override @@ -114,7 +111,6 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { long start = now(); result = m.invoke(handler, args); int processTime = (int) (now() - start); - metrics.incMethodTime(m.getName(), processTime); } catch (InvocationTargetException e) { throw e.getTargetException(); } catch (Exception e) { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java index 695c74b..3276322 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java @@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.security.SecurityUtil; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.thrift.CallQueue; import org.apache.hadoop.hbase.thrift.CallQueue.Call; -import org.apache.hadoop.hbase.thrift.ThriftMetrics; import org.apache.hadoop.hbase.thrift2.generated.THBaseService; import org.apache.hadoop.hbase.util.DNS; import org.apache.hadoop.hbase.util.Strings; @@ -252,7 +251,7 @@ public class ThriftServer extends Configured implements Tool { private static TServer getTHsHaServer(TProtocolFactory protocolFactory, TProcessor processor, TTransportFactory transportFactory, int workerThreads, - InetSocketAddress inetSocketAddress, ThriftMetrics metrics) + InetSocketAddress inetSocketAddress) throws TTransportException { TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(inetSocketAddress); log.info("starting HBase HsHA Thrift server on " + inetSocketAddress.toString()); @@ -262,7 +261,7 @@ public class ThriftServer extends Configured implements Tool { serverArgs.minWorkerThreads(workerThreads).maxWorkerThreads(workerThreads); } ExecutorService executorService = createExecutor( - workerThreads, metrics); + workerThreads); serverArgs.executorService(executorService); serverArgs.processor(processor); serverArgs.transportFactory(transportFactory); @@ -271,9 +270,9 @@ public class ThriftServer extends Configured implements Tool { } private static ExecutorService createExecutor( - int workerThreads, ThriftMetrics metrics) { + int workerThreads) { CallQueue callQueue = new CallQueue( - new LinkedBlockingQueue(), metrics); + new LinkedBlockingQueue()); ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); tfb.setDaemon(true); tfb.setNameFormat("thrift2-worker-%d"); @@ -424,8 +423,6 @@ public class ThriftServer extends Configured implements Tool { boolean nonblocking = cmd.hasOption("nonblocking"); boolean hsha = cmd.hasOption("hsha"); - ThriftMetrics metrics = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.TWO); - String implType = "threadpool"; if (nonblocking) { implType = "nonblocking"; @@ -444,7 +441,7 @@ public class ThriftServer extends Configured implements Tool { final ThriftHBaseServiceHandler hbaseHandler = new ThriftHBaseServiceHandler(conf, userProvider); THBaseService.Iface handler = - ThriftHBaseServiceHandler.newInstance(hbaseHandler, metrics); + ThriftHBaseServiceHandler.newInstance(hbaseHandler); final THBaseService.Processor p = new THBaseService.Processor(handler); conf.setBoolean("hbase.regionserver.thrift.compact", compact); TProcessor processor = p; @@ -508,8 +505,7 @@ public class ThriftServer extends Configured implements Tool { processor, transportFactory, workerThreads, - inetSocketAddress, - metrics); + inetSocketAddress); } else { server = getTThreadPoolServer(protocolFactory, processor, diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java deleted file mode 100644 index b646009..0000000 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hbase.thrift; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.concurrent.LinkedBlockingQueue; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.thrift.CallQueue.Call; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import org.junit.Test; - -/** - * Unit testing for CallQueue, a part of the - * org.apache.hadoop.hbase.thrift package. - */ -@Category({ClientTests.class, SmallTests.class}) -@RunWith(Parameterized.class) -public class TestCallQueue { - - private static final Log LOG = LogFactory.getLog(TestCallQueue.class); - private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - - private static final MetricsAssertHelper metricsHelper = - CompatibilitySingletonFactory.getInstance(MetricsAssertHelper.class); - - private int elementsAdded; - private int elementsRemoved; - - @Parameters - public static Collection getParameters() { - Collection parameters = new ArrayList(); - for (int elementsAdded : new int[] {100, 200, 300}) { - for (int elementsRemoved : new int[] {0, 20, 100}) { - parameters.add(new Object[]{new Integer(elementsAdded), - new Integer(elementsRemoved)}); - } - } - return parameters; - } - - public TestCallQueue(int elementsAdded, int elementsRemoved) { - this.elementsAdded = elementsAdded; - this.elementsRemoved = elementsRemoved; - LOG.debug("elementsAdded:" + elementsAdded + - " elementsRemoved:" + elementsRemoved); - - } - - @Test(timeout = 60000) - public void testPutTake() throws Exception { - ThriftMetrics metrics = createMetrics(); - CallQueue callQueue = new CallQueue( - new LinkedBlockingQueue(), metrics); - for (int i = 0; i < elementsAdded; ++i) { - callQueue.put(createDummyRunnable()); - } - for (int i = 0; i < elementsRemoved; ++i) { - callQueue.take(); - } - verifyMetrics(metrics, "timeInQueue_num_ops", elementsRemoved); - } - - @Test(timeout = 60000) - public void testOfferPoll() throws Exception { - ThriftMetrics metrics = createMetrics(); - CallQueue callQueue = new CallQueue( - new LinkedBlockingQueue(), metrics); - for (int i = 0; i < elementsAdded; ++i) { - callQueue.offer(createDummyRunnable()); - } - for (int i = 0; i < elementsRemoved; ++i) { - callQueue.poll(); - } - verifyMetrics(metrics, "timeInQueue_num_ops", elementsRemoved); - } - - private static ThriftMetrics createMetrics() throws Exception { - Configuration conf = UTIL.getConfiguration(); - ThriftMetrics m = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.ONE); - m.getSource().init(); - return m; - } - - - private static void verifyMetrics(ThriftMetrics metrics, String name, int expectValue) - throws Exception { - metricsHelper.assertCounter(name, expectValue, metrics.getSource()); - } - - private static Runnable createDummyRunnable() { - return new Runnable() { - @Override - public void run() { - } - }; - } - -} - diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java index d5a020e..efeb47f 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java @@ -32,14 +32,12 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.filter.ParseFilter; import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler; @@ -69,8 +67,6 @@ import org.junit.experimental.categories.Category; public class TestThriftServer { private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static final Log LOG = LogFactory.getLog(TestThriftServer.class); - private static final MetricsAssertHelper metricsHelper = CompatibilityFactory - .getInstance(MetricsAssertHelper.class); protected static final int MAXVERSIONS = 3; private static ByteBuffer asByteBuffer(String i) { @@ -118,7 +114,6 @@ public class TestThriftServer { public void testAll() throws Exception { // Run all tests doTestTableCreateDrop(); - doTestThriftMetrics(); doTestTableMutations(); doTestTableTimestampsAndColumns(); doTestTableScanners(); @@ -164,68 +159,6 @@ public class TestThriftServer { } } - /** - * TODO: These counts are supposed to be zero but sometimes they are not, they are equal to the - * passed in maybe. Investigate why. My guess is they are set by the test that runs just - * previous to this one. Sometimes they are cleared. Sometimes not. - * @param name - * @param maybe - * @param metrics - * @return - */ - private int getCurrentCount(final String name, final int maybe, final ThriftMetrics metrics) { - int currentCount = 0; - try { - metricsHelper.assertCounter(name, maybe, metrics.getSource()); - LOG.info("Shouldn't this be null? name=" + name + ", equals=" + maybe); - currentCount = maybe; - } catch (AssertionError e) { - // Ignore - } - return currentCount; - } - - /** - * Tests if the metrics for thrift handler work correctly - */ - public void doTestThriftMetrics() throws Exception { - LOG.info("START doTestThriftMetrics"); - Configuration conf = UTIL.getConfiguration(); - ThriftMetrics metrics = getMetrics(conf); - Hbase.Iface handler = getHandlerForMetricsTest(metrics, conf); - int currentCountCreateTable = getCurrentCount("createTable_num_ops", 2, metrics); - int currentCountDeleteTable = getCurrentCount("deleteTable_num_ops", 2, metrics); - int currentCountDisableTable = getCurrentCount("disableTable_num_ops", 2, metrics); - createTestTables(handler); - dropTestTables(handler);; - metricsHelper.assertCounter("createTable_num_ops", currentCountCreateTable + 2, - metrics.getSource()); - metricsHelper.assertCounter("deleteTable_num_ops", currentCountDeleteTable + 2, - metrics.getSource()); - metricsHelper.assertCounter("disableTable_num_ops", currentCountDisableTable + 2, - metrics.getSource()); - handler.getTableNames(); // This will have an artificial delay. - - // 3 to 6 seconds (to account for potential slowness), measured in nanoseconds - try { - metricsHelper.assertGaugeGt("getTableNames_avg_time", 3L * 1000 * 1000 * 1000, metrics.getSource()); - metricsHelper.assertGaugeLt("getTableNames_avg_time",6L * 1000 * 1000 * 1000, metrics.getSource()); - } catch (AssertionError e) { - LOG.info("Fix me! Why does this happen? A concurrent cluster running?", e); - } - } - - private static Hbase.Iface getHandlerForMetricsTest(ThriftMetrics metrics, Configuration conf) - throws Exception { - Hbase.Iface handler = new MySlowHBaseHandler(conf); - return HbaseHandlerMetricsProxy.newInstance(handler, metrics, conf); - } - - private static ThriftMetrics getMetrics(Configuration conf) throws Exception { - return new ThriftMetrics( conf, ThriftMetrics.ThriftServerType.ONE); - } - - public static void createTestTables(Hbase.Iface handler) throws Exception { // Create/enable/disable/delete tables, ensure methods act correctly assertEquals(handler.getTableNames().size(), 0); diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java index 654324d..411e264 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.thrift2; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -35,10 +34,8 @@ import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.filter.ParseFilter; import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.thrift.ThriftMetrics; import org.apache.hadoop.hbase.thrift2.generated.TAppend; import org.apache.hadoop.hbase.thrift2.generated.TColumn; import org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement; @@ -46,7 +43,6 @@ import org.apache.hadoop.hbase.thrift2.generated.TColumnValue; import org.apache.hadoop.hbase.thrift2.generated.TDelete; import org.apache.hadoop.hbase.thrift2.generated.TDeleteType; import org.apache.hadoop.hbase.thrift2.generated.TGet; -import org.apache.hadoop.hbase.thrift2.generated.THBaseService; import org.apache.hadoop.hbase.thrift2.generated.TIOError; import org.apache.hadoop.hbase.thrift2.generated.TIllegalArgument; import org.apache.hadoop.hbase.thrift2.generated.TIncrement; @@ -104,11 +100,6 @@ public class TestThriftHBaseServiceHandler { new HColumnDescriptor(familyBname).setMaxVersions(2) }; - - private static final MetricsAssertHelper metricsHelper = - CompatibilityFactory.getInstance(MetricsAssertHelper.class); - - public void assertTColumnValuesEqual(List columnValuesA, List columnValuesB) { assertEquals(columnValuesA.size(), columnValuesB.size()); @@ -887,38 +878,6 @@ public class TestThriftHBaseServiceHandler { } @Test - public void testMetrics() throws Exception { - Configuration conf = UTIL.getConfiguration(); - ThriftMetrics metrics = getMetrics(conf); - ThriftHBaseServiceHandler hbaseHandler = createHandler(); - THBaseService.Iface handler = - ThriftHBaseServiceHandler.newInstance(hbaseHandler, metrics); - byte[] rowName = "testMetrics".getBytes(); - ByteBuffer table = wrap(tableAname); - - TGet get = new TGet(wrap(rowName)); - assertFalse(handler.exists(table, get)); - - List columnValues = new ArrayList(); - columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); - columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname))); - TPut put = new TPut(wrap(rowName), columnValues); - put.setColumnValues(columnValues); - - handler.put(table, put); - - assertTrue(handler.exists(table, get)); - metricsHelper.assertCounter("put_num_ops", 1, metrics.getSource()); - metricsHelper.assertCounter( "exists_num_ops", 2, metrics.getSource()); - } - - private static ThriftMetrics getMetrics(Configuration conf) throws Exception { - ThriftMetrics m = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.TWO); - m.getSource().init(); //Clear all the metrics - return m; - } - - @Test public void testAttribute() throws Exception { byte[] rowName = "testAttribute".getBytes(); byte[] attributeKey = "attribute1".getBytes(); diff --git a/pom.xml b/pom.xml index d865b0c..d16716a 100644 --- a/pom.xml +++ b/pom.xml @@ -56,7 +56,6 @@ hbase-shell hbase-protocol hbase-client - hbase-hadoop-compat hbase-common hbase-procedure hbase-it @@ -1322,30 +1321,6 @@ test-jar - org.apache.hbase - hbase-hadoop-compat - ${project.version} - - - org.apache.hbase - hbase-hadoop-compat - ${project.version} - test-jar - test - - - org.apache.hbase - ${compat.module} - ${project.version} - - - org.apache.hbase - ${compat.module} - ${project.version} - test-jar - test - - hbase-server org.apache.hbase ${project.version} @@ -1952,12 +1927,8 @@ !hadoop.profile - - hbase-hadoop2-compat - ${hadoop-two.version} - hbase-hadoop2-compat src/main/assembly/hadoop-two-compat.xml @@ -2124,14 +2095,8 @@ 3.0 - - - hbase-hadoop2-compat - ${hadoop-three.version} - - hbase-hadoop2-compat src/main/assembly/hadoop-three-compat.xml -- 2.6.3