diff --git dev-support/findbugs-exclude.xml dev-support/findbugs-exclude.xml
index 5ab62e4..33c3c3c 100644
--- dev-support/findbugs-exclude.xml
+++ dev-support/findbugs-exclude.xml
@@ -50,7 +50,7 @@
-
+
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java
index 97e887f..2b2c53d 100644
--- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java
@@ -44,6 +44,7 @@ public class CompatibilitySingletonFactory extends CompatibilityFactory {
*
* @return the singleton
*/
+ @SuppressWarnings("unchecked")
public static synchronized T getInstance(Class klass) {
T instance = (T) instances.get(klass);
if (instance == null) {
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
new file mode 100644
index 0000000..bf31a6b
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.hbase.metrics.BaseSource;
+
+/**
+ * Interface that classes that expose metrics about the master will implement.
+ */
+public interface MetricsMasterSource extends BaseSource {
+
+ /**
+ * The name of the metrics
+ */
+ static final String METRICS_NAME = "Server";
+
+ /**
+ * The context metrics will be under.
+ */
+ static final String METRICS_CONTEXT = "master";
+
+ /**
+ * The name of the metrics context that metrics will be under in jmx
+ */
+ static final String METRICS_JMX_CONTEXT = "Master,sub="+METRICS_NAME;
+
+ /**
+ * Description
+ */
+ static final String METRICS_DESCRIPTION = "Metrics about HBase master server";
+
+ // Strings used for exporting to metrics system.
+ static final String MASTER_ACTIVE_TIME_NAME = "masterActiveTime";
+ static final String MASTER_START_TIME_NAME = "masterStartTime";
+ static final String AVERAGE_LOAD_NAME = "averageLoad";
+ static final String NUM_REGION_SERVERS_NAME = "numRegionServers";
+ static final String NUM_DEAD_REGION_SERVERS_NAME = "numDeadRegionServers";
+ static final String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum";
+ static final String SERVER_NAME_NAME = "serverName";
+ static final String CLUSTER_ID_NAME = "clusterId";
+ static final String IS_ACTIVE_MASTER_NAME = "isActiveMaster";
+ static final String SPLIT_TIME_NAME = "hlogSplitTime";
+ static final String SPLIT_SIZE_NAME = "hlogSplitSize";
+ static final String CLUSTER_REQUESTS_NAME = "clusterRequests";
+ static final String RIT_COUNT_NAME = "ritCount";
+ static final String RIT_COUNT_OVER_THRESHOLD_NAME = "ritCountOverThreshold";
+ static final String RIT_OLDEST_AGE_NAME = "ritOldestAge";
+ static final String MASTER_ACTIVE_TIME_DESC = "Master Active Time";
+ static final String MASTER_START_TIME_DESC = "Master Start Time";
+ static final String AVERAGE_LOAD_DESC = "AverageLoad";
+ static final String NUMBER_OF_REGION_SERVERS_DESC = "Number of RegionServers";
+ static final String NUMBER_OF_DEAD_REGION_SERVERS_DESC = "Number of dead RegionServers";
+ static final String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum";
+ static final String SERVER_NAME_DESC = "Server Name";
+ static final String CLUSTER_ID_DESC = "Cluster Id";
+ static final String IS_ACTIVE_MASTER_DESC = "Is Active Master";
+ static final String SPLIT_TIME_DESC = "Time it takes to finish HLog.splitLog()";
+ static final String SPLIT_SIZE_DESC = "Size of HLog files being split";
+
+
+ /**
+ * Increment the number of requests the cluster has seen.
+ * @param inc Ammount to increment the total by.
+ */
+ void incRequests(final int inc);
+
+ /**
+ * Set the number of regions in transition.
+ * @param ritCount count of the regions in transition.
+ */
+ void setRIT(int ritCount);
+
+ /**
+ * Set the count of the number of regions that have been in transition over the threshold time.
+ * @param ritCountOverThreshold number of regions in transition for longer than threshold.
+ */
+ void setRITCountOverThreshold(int ritCountOverThreshold);
+
+ /**
+ * Set the oldest region in transition.
+ * @param age age of the oldest RIT.
+ */
+ void setRITOldestAge(long age);
+
+ void updateSplitTime(long time);
+
+ void updateSplitSize(long size);
+
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java
new file mode 100644
index 0000000..9e218bb
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+/**
+ * Interface of a factory to create MetricsMasterSource when given a MetricsMasterWrapper
+ */
+public interface MetricsMasterSourceFactory {
+
+ MetricsMasterSource create(MetricsMasterWrapper masterWrapper);
+
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
new file mode 100644
index 0000000..f8c2900
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+/**
+ * This is the interface that will expose information to hadoop1/hadoop2 implementations of the
+ * MetricsMasterSource.
+ */
+public interface MetricsMasterWrapper {
+
+ /**
+ * Get ServerName
+ */
+ String getServerName();
+
+ /**
+ * Get Average Load
+ * @return Average Load
+ */
+ double getAverageLoad();
+
+ /**
+ * Get the Cluster ID
+ * @return Cluster ID
+ */
+ String getClusterId();
+
+ /**
+ * Get the Zookeeper Quorum Info
+ * @return Zookeeper Quorum Info
+ */
+ String getZookeeperQuorum();
+
+ /**
+ * Get the co-processors
+ * @return Co-processors
+ */
+ String[] getCoprocessors();
+
+ /**
+ * Get hbase master start time
+ * @return Start time of master in milliseconds
+ */
+ long getMasterStartTime();
+
+ /**
+ * Get the hbase master active time
+ * @return Time in milliseconds when master became active
+ */
+ long getMasterActiveTime();
+
+ /**
+ * Whether this master is the active master
+ * @return True if this is the active master
+ */
+ boolean getIsActiveMaster();
+
+ /**
+ * Get the live region servers
+ * @return Live region servers
+ */
+ int getRegionServers();
+
+ /**
+ * Get the dead region servers
+ * @return Dead region Servers
+ */
+ int getDeadRegionServers();
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSource.java
deleted file mode 100644
index 8fcfaf0..0000000
--- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSource.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.metrics;
-
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
-
-/**
- * Interface that classes that expose metrics about the master will implement.
- */
-public interface MasterMetricsSource extends BaseMetricsSource {
-
- /**
- * The name of the metrics
- */
- static final String METRICS_NAME = "HMaster";
-
- /**
- * The context metrics will be under.
- */
- static final String METRICS_CONTEXT = "hmaster";
-
- /**
- * The name of the metrics context that metrics will be under in jmx
- */
- static final String METRICS_JMX_CONTEXT = "HMaster";
-
- /**
- * Description
- */
- static final String METRICS_DESCRIPTION = "Metrics about HBase master server";
-
- // Strings used for exporting to metrics system.
- static final String MASTER_ACTIVE_TIME_NAME = "masterActiveTime";
- static final String MASTER_START_TIME_NAME = "masterStartTime";
- static final String AVERAGE_LOAD_NAME = "averageLoad";
- static final String NUM_REGION_SERVERS_NAME = "numRegionServers";
- static final String NUM_DEAD_REGION_SERVERS_NAME = "numDeadRegionServers";
- static final String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum";
- static final String SERVER_NAME_NAME = "serverName";
- static final String CLUSTER_ID_NAME = "clusterId";
- static final String IS_ACTIVE_MASTER_NAME = "isActiveMaster";
- static final String SPLIT_TIME_NAME = "hlogSplitTime";
- static final String SPLIT_SIZE_NAME = "hlogSplitSize";
- static final String CLUSTER_REQUESTS_NAME = "clusterRequests";
- static final String RIT_COUNT_NAME = "ritCount";
- static final String RIT_COUNT_OVER_THRESHOLD_NAME = "ritCountOverThreshold";
- static final String RIT_OLDEST_AGE_NAME = "ritOldestAge";
- static final String MASTER_ACTIVE_TIME_DESC = "Master Active Time";
- static final String MASTER_START_TIME_DESC = "Master Start Time";
- static final String AVERAGE_LOAD_DESC = "AverageLoad";
- static final String NUMBER_OF_REGION_SERVERS_DESC = "Number of RegionServers";
- static final String NUMBER_OF_DEAD_REGION_SERVERS_DESC = "Number of dead RegionServers";
- static final String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum";
- static final String SERVER_NAME_DESC = "Server Name";
- static final String CLUSTER_ID_DESC = "Cluster Id";
- static final String IS_ACTIVE_MASTER_DESC = "Is Active Master";
- static final String SPLIT_TIME_DESC = "Time it takes to finish HLog.splitLog()";
- static final String SPLIT_SIZE_DESC = "Size of HLog files being split";
-
-
- /**
- * Increment the number of requests the cluster has seen.
- * @param inc Ammount to increment the total by.
- */
- void incRequests(final int inc);
-
- /**
- * Set the number of regions in transition.
- * @param ritCount count of the regions in transition.
- */
- void setRIT(int ritCount);
-
- /**
- * Set the count of the number of regions that have been in transition over the threshold time.
- * @param ritCountOverThreshold number of regions in transition for longer than threshold.
- */
- void setRITCountOverThreshold(int ritCountOverThreshold);
-
- /**
- * Set the oldest region in transition.
- * @param age age of the oldest RIT.
- */
- void setRITOldestAge(long age);
-
- void updateSplitTime(long time);
-
- void updateSplitSize(long size);
-
-}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactory.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactory.java
deleted file mode 100644
index 157b2de..0000000
--- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactory.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.metrics;
-
-/**
- * Interface of a factory to create MasterMetricsSource when given a MasterMetricsWrapper
- */
-public interface MasterMetricsSourceFactory {
-
- MasterMetricsSource create(MasterMetricsWrapper beanWrapper);
-
-}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapper.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapper.java
deleted file mode 100644
index ff416eb..0000000
--- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapper.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.metrics;
-
-/**
- * This is the interface that will expose information to hadoop1/hadoop2 implementations of the
- * MasterMetricsSource.
- */
-public interface MasterMetricsWrapper {
-
- /**
- * Get ServerName
- */
- String getServerName();
-
- /**
- * Get Average Load
- * @return Average Load
- */
- double getAverageLoad();
-
- /**
- * Get the Cluster ID
- * @return Cluster ID
- */
- String getClusterId();
-
- /**
- * Get the Zookeeper Quorum Info
- * @return Zookeeper Quorum Info
- */
- String getZookeeperQuorum();
-
- /**
- * Get the co-processors
- * @return Co-processors
- */
- String[] getCoprocessors();
-
- /**
- * Get hbase master start time
- * @return Start time of master in milliseconds
- */
- long getMasterStartTime();
-
- /**
- * Get the hbase master active time
- * @return Time in milliseconds when master became active
- */
- long getMasterActiveTime();
-
- /**
- * Whether this master is the active master
- * @return True if this is the active master
- */
- boolean getIsActiveMaster();
-
- /**
- * Get the live region servers
- * @return Live region servers
- */
- int getRegionServers();
-
- /**
- * Get the dead region servers
- * @return Dead region Servers
- */
- int getDeadRegionServers();
-}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSource.java
deleted file mode 100644
index e8cefef..0000000
--- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSource.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.metrics;
-
-/**
- * BaseMetricsSource for dynamic metrics to announce to Metrics2
- */
-public interface BaseMetricsSource {
-
- /**
- * Clear out the metrics and re-prepare the source.
- */
- void init();
-
- /**
- * Set a gauge to a specific value.
- *
- * @param gaugeName the name of the gauge
- * @param value the value
- */
- void setGauge(String gaugeName, long value);
-
- /**
- * Add some amount to a gauge.
- *
- * @param gaugeName the name of the gauge
- * @param delta the amount to change the gauge by.
- */
- void incGauge(String gaugeName, long delta);
-
- /**
- * Subtract some amount from a gauge.
- *
- * @param gaugeName the name of the gauge
- * @param delta the amount to change the gauge by.
- */
- void decGauge(String gaugeName, long delta);
-
- /**
- * Remove a gauge and no longer announce it.
- *
- * @param key Name of the gauge to remove.
- */
- void removeGauge(String key);
-
- /**
- * Add some amount to a counter.
- *
- * @param counterName the name of the counter
- * @param delta the amount to change the counter by.
- */
- void incCounters(String counterName, long delta);
-
- /**
- * Add some value to a histogram.
- *
- * @param name the name of the histogram
- * @param value the value to add to the histogram
- */
- void updateHistogram(String name, long value);
-
-
- /**
- * Add some value to a Quantile (An accurate histogram).
- *
- * @param name the name of the quantile
- * @param value the value to add to the quantile
- */
- void updateQuantile(String name, long value);
-
-
- /**
- * Remove a counter and stop announcing it to metrics2.
- *
- * @param key
- */
- void removeCounter(String key);
-
-}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
new file mode 100644
index 0000000..20139c4
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.metrics;
+
+/**
+ * BaseSource for dynamic metrics to announce to Metrics2
+ */
+public interface BaseSource {
+
+ public static final String HBASE_METRICS_SYSTEM_NAME = "HBase";
+
+ /**
+ * Clear out the metrics and re-prepare the source.
+ */
+ void init();
+
+ /**
+ * Set a gauge to a specific value.
+ *
+ * @param gaugeName the name of the gauge
+ * @param value the value
+ */
+ void setGauge(String gaugeName, long value);
+
+ /**
+ * Add some amount to a gauge.
+ *
+ * @param gaugeName the name of the gauge
+ * @param delta the amount to change the gauge by.
+ */
+ void incGauge(String gaugeName, long delta);
+
+ /**
+ * Subtract some amount from a gauge.
+ *
+ * @param gaugeName the name of the gauge
+ * @param delta the amount to change the gauge by.
+ */
+ void decGauge(String gaugeName, long delta);
+
+ /**
+ * Remove a metric and no longer announce it.
+ *
+ * @param key Name of the gauge to remove.
+ */
+ void removeMetric(String key);
+
+ /**
+ * Add some amount to a counter.
+ *
+ * @param counterName the name of the counter
+ * @param delta the amount to change the counter by.
+ */
+ void incCounters(String counterName, long delta);
+
+ /**
+ * Add some value to a histogram.
+ *
+ * @param name the name of the histogram
+ * @param value the value to add to the histogram
+ */
+ void updateHistogram(String name, long value);
+
+
+ /**
+ * Add some value to a Quantile (An accurate histogram).
+ *
+ * @param name the name of the quantile
+ * @param value the value to add to the quantile
+ */
+ void updateQuantile(String name, long value);
+
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MerticsRegionSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MerticsRegionSource.java
new file mode 100644
index 0000000..49cd4a6
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MerticsRegionSource.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+
+/**
+ * This interface will be implemented to allow single regions to push metrics into
+ * MetricsRegionAggregateSource that will in turn push data to the Hadoop metrics system.
+ */
+public interface MerticsRegionSource extends Comparable {
+
+ /**
+ * Close the region's metrics as this region is closing.
+ */
+ void close();
+
+ /**
+ * Update related counts of puts.
+ */
+ void updatePut();
+
+ /**
+ * Update related counts of deletes.
+ */
+ void updateDelete();
+
+ /**
+ * Update related counts of gets.
+ */
+ void updateGet();
+
+ /**
+ * Update related counts of increments.
+ */
+ void updateIncrement();
+
+ /**
+ * Update related counts of appends.
+ */
+ void updateAppend();
+
+ /**
+ * Get the aggregate source to which this reports.
+ */
+ MetricsRegionAggregateSource getAggregateSource();
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java
new file mode 100644
index 0000000..dad6e47
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSource;
+
+/**
+ * This interface will be implemented by a MetricsSource that will export metrics from
+ * multiple regions into the hadoop metrics system.
+ */
+public interface MetricsRegionAggregateSource extends BaseSource {
+
+ /**
+ * The name of the metrics
+ */
+ static final String METRICS_NAME = "Regions";
+
+ /**
+ * The name of the metrics context that metrics will be under.
+ */
+ static final String METRICS_CONTEXT = "regionserver";
+
+ /**
+ * Description
+ */
+ static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer regions and tables";
+
+ /**
+ * The name of the metrics context that metrics will be under in jmx
+ */
+ static final String METRICS_JMX_CONTEXT = "RegionServer,sub="+METRICS_NAME;
+
+ /**
+ * Register a MerticsRegionSource as being open.
+ * @param merticsRegionSource the source for the region being opened.
+ */
+ void register(MerticsRegionSource merticsRegionSource);
+
+ /**
+ * Remove a region's source. This is called when a region is closed.
+ * @param merticsRegionSource The region to remove.
+ */
+ void deregister(MerticsRegionSource merticsRegionSource);
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
new file mode 100644
index 0000000..d19ddba
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSource;
+
+/**
+ * Interface for classes that expose metrics about the regionserver.
+ */
+public interface MetricsRegionServerSource extends BaseSource {
+
+ /**
+ * The name of the metrics
+ */
+ static final String METRICS_NAME = "Server";
+
+ /**
+ * The name of the metrics context that metrics will be under.
+ */
+ static final String METRICS_CONTEXT = "regionserver";
+
+ /**
+ * Description
+ */
+ static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer";
+
+ /**
+ * The name of the metrics context that metrics will be under in jmx
+ */
+ static final String METRICS_JMX_CONTEXT = "RegionServer,sub="+METRICS_NAME;
+
+ /**
+ * Update the Put time histogram
+ * @param t time it took
+ */
+ void updatePut(long t);
+
+ /**
+ * Update the Delete time histogram
+ * @param t time it took
+ */
+ void updateDelete(long t);
+
+ /**
+ * Update the Get time histogram .
+ * @param t time it took
+ */
+ void updateGet(long t);
+
+ /**
+ * Update the Increment time histogram.
+ * @param t time it took
+ */
+ void updateIncrement(long t);
+
+ /**
+ * Update the Append time histogram.
+ * @param t time it took
+ */
+ void updateAppend(long t);
+
+ // Strings used for exporting to metrics system.
+ static final String REGION_COUNT = "regionCount";
+ static final String REGION_COUNT_DESC = "Number of regions";
+ static final String STORE_COUNT = "storeCount";
+ static final String STORE_COUNT_DESC = "Number of Stores";
+ static final String STOREFILE_COUNT = "storeFileCount";
+ static final String STOREFILE_COUNT_DESC = "Number of Store Files";
+ static final String MEMSTORE_SIZE = "memStoreSize";
+ static final String MEMSTORE_SIZE_DESC = "Size of the memstore";
+ static final String STOREFILE_SIZE = "storeFileSize";
+ static final String STOREFILE_SIZE_DESC = "Size of storefiles being served.";
+ static final String TOTAL_REQUEST_COUNT = "totalRequestCount";
+ static final String TOTAL_REQUEST_COUNT_DESC =
+ "Total number of requests this RegionServer has answered.";
+ static final String READ_REQUEST_COUNT = "readRequestCount";
+ static final String READ_REQUEST_COUNT_DESC =
+ "Number of read requests this region server has answered.";
+ static final String WRITE_REQUEST_COUNT = "writeRequestCount";
+ static final String WRITE_REQUEST_COUNT_DESC =
+ "Number of mutation requests this region server has answered.";
+ static final String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount";
+ static final String CHECK_MUTATE_FAILED_COUNT_DESC =
+ "Number of Check and Mutate calls that failed the checks.";
+ static final String CHECK_MUTATE_PASSED_COUNT = "checkMutatePassedCount";
+ static final String CHECK_MUTATE_PASSED_COUNT_DESC =
+ "Number of Check and Mutate calls that passed the checks.";
+ static final String STOREFILE_INDEX_SIZE = "storeFileIndexSize";
+ static final String STOREFILE_INDEX_SIZE_DESC = "Size of indexes in storefiles on disk.";
+ static final String STATIC_INDEX_SIZE = "staticIndexSize";
+ static final String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes.";
+ static final String STATIC_BLOOM_SIZE = "staticBloomSize";
+ static final String STATIC_BLOOM_SIZE_DESC =
+ "Uncompressed size of the static bloom filters.";
+ static final String NUMBER_OF_PUTS_WITHOUT_WAL = "putsWithoutWALCount";
+ static final String NUMBER_OF_PUTS_WITHOUT_WAL_DESC =
+ "Number of mutations that have been sent by clients with the write ahead logging turned off.";
+ static final String DATA_SIZE_WITHOUT_WAL = "putsWithoutWALSize";
+ static final String DATA_SIZE_WITHOUT_WAL_DESC =
+ "Size of data that has been sent by clients with the write ahead logging turned off.";
+ static final String PERCENT_FILES_LOCAL = "percentFilesLocal";
+ static final String PERCENT_FILES_LOCAL_DESC =
+ "The percent of HFiles that are stored on the local hdfs data node.";
+ static final String COMPACTION_QUEUE_LENGTH = "compactionQueueLength";
+ static final String COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions.";
+ static final String FLUSH_QUEUE_LENGTH = "flushQueueLength";
+ static final String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes";
+ static final String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize";
+ static final String BLOCK_CACHE_FREE_DESC =
+ "Size of the block cache that is not occupied.";
+ static final String BLOCK_CACHE_COUNT = "blockCacheCount";
+ static final String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache.";
+ static final String BLOCK_CACHE_SIZE = "blockCacheSize";
+ static final String BLOCK_CACHE_SIZE_DESC = "Size of the block cache.";
+ static final String BLOCK_CACHE_HIT_COUNT = "blockCacheHitCount";
+ static final String BLOCK_CACHE_HIT_COUNT_DESC = "Count of the hit on the block cache.";
+ static final String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount";
+ static final String BLOCK_COUNT_MISS_COUNT_DESC =
+ "Number of requests for a block that missed the block cache.";
+ static final String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount";
+ static final String BLOCK_CACHE_EVICTION_COUNT_DESC =
+ "Count of the number of blocks evicted from the block cache.";
+ static final String BLOCK_CACHE_HIT_PERCENT = "blockCountHitPercent";
+ static final String BLOCK_CACHE_HIT_PERCENT_DESC =
+ "Percent of block cache requests that are hits";
+ static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent";
+ static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC =
+ "The percent of the time that requests with the cache turned on hit the cache.";
+ static final String RS_START_TIME_NAME = "regionServerStartTime";
+ static final String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum";
+ static final String SERVER_NAME_NAME = "serverName";
+ static final String CLUSTER_ID_NAME = "clusterId";
+ static final String RS_START_TIME_DESC = "RegionServer Start Time";
+ static final String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum";
+ static final String SERVER_NAME_DESC = "Server Name";
+ static final String CLUSTER_ID_DESC = "Cluster Id";
+ static final String UPDATES_BLOCKED_TIME = "updatesBlockedTime";
+ static final String UPDATES_BLOCKED_DESC =
+ "Number of MS updates have been blocked so that the memstore can be flushed.";
+ static final String DELETE_KEY = "delete";
+ static final String GET_KEY = "get";
+ static final String INCREMENT_KEY = "increment";
+ static final String PUT_KEY = "multiput";
+ static final String APPEND_KEY = "append";
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java
new file mode 100644
index 0000000..42f66d1
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Interface of a factory to create Metrics Sources used inside of regionservers.
+ */
+public interface MetricsRegionServerSourceFactory {
+
+ /**
+ * Given a wrapper create a MetricsRegionServerSource.
+ * @param regionServerWrapper The wrapped region server
+ * @return a Metrics Source.
+ */
+ MetricsRegionServerSource createGeneral(MetricsRegionServerWrapper regionServerWrapper);
+
+ /**
+ * Create a MerticsRegionSource from a MetricsRegionWrapper.
+ * @param wrapper
+ * @return
+ */
+ MerticsRegionSource createRegion(MetricsRegionWrapper wrapper);
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
new file mode 100644
index 0000000..2bdb2b1
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * This is the interface that will expose RegionServer information to hadoop1/hadoop2
+ * implementations of the MetricsRegionServerSource.
+ */
+public interface MetricsRegionServerWrapper {
+
+ /**
+ * Get ServerName
+ */
+ public String getServerName();
+
+ /**
+ * Get the Cluster ID
+ * @return Cluster ID
+ */
+ public String getClusterId();
+
+ /**
+ * Get the Zookeeper Quorum Info
+ * @return Zookeeper Quorum Info
+ */
+ public String getZookeeperQuorum();
+
+ /**
+ * Get the co-processors
+ * @return Co-processors
+ */
+ public String getCoprocessors();
+
+ /**
+ * Get HRegionServer start time
+ * @return Start time of RegionServer in milliseconds
+ */
+ public long getRegionServerStartTime();
+
+ /**
+ * The number of online regions
+ */
+ long getNumOnlineRegions();
+
+ /**
+ * Get the number of stores hosted on this region server.
+ */
+ long getNumStores();
+
+ /**
+ * Get the number of store files hosted on this region server.
+ */
+ long getNumStoreFiles();
+
+ /**
+ * Get the size of the memstore on this region server.
+ */
+ long getMemstoreSize();
+
+ /**
+ * Get the total size of the store files this region server is serving from.
+ */
+ long getStoreFileSize();
+
+ /**
+ * Get the number of requests per second.
+ */
+ double getRequestsPerSecond();
+
+ /**
+ * Get the total number of requests per second.
+ */
+ long getTotalRequestCount();
+
+ /**
+ * Get the number of read requests to regions hosted on this region server.
+ */
+ long getReadRequestsCount();
+
+ /**
+ * Get the number of write requests to regions hosted on this region server.
+ */
+ long getWriteRequestsCount();
+
+ /**
+ * Get the number of CAS operations that failed.
+ */
+ long getCheckAndMutateChecksFailed();
+
+ /**
+ * Get the number of CAS operations that passed.
+ */
+ long getCheckAndMutateChecksPassed();
+
+ /**
+ * Get the Size of indexes in storefiles on disk.
+ */
+ long getStoreFileIndexSize();
+
+ /**
+ * Get the size of of the static indexes including the roots.
+ */
+ long getTotalStaticIndexSize();
+
+ /**
+ * Get the size of the static bloom filters.
+ */
+ long getTotalStaticBloomSize();
+
+ /**
+ * Number of mutations received with WAL explicitly turned off.
+ */
+ long getNumPutsWithoutWAL();
+
+ /**
+ * Ammount of data in the memstore but not in the WAL because mutations explicitly had their
+ * WAL turned off.
+ */
+ long getDataInMemoryWithoutWAL();
+
+ /**
+ * Get the percent of HFiles' that are local.
+ */
+ int getPercentFileLocal();
+
+ /**
+ * Get the size of the compaction queue
+ */
+ int getCompactionQueueSize();
+
+ /**
+ * Get the size of the flush queue.
+ */
+ int getFlushQueueSize();
+
+ /**
+ * Get the size of the block cache that is free.
+ */
+ long getBlockCacheFreeSize();
+
+ /**
+ * Get the number of items in the block cache.
+ */
+ long getBlockCacheCount();
+
+ /**
+ * Get the total size of the block cache.
+ */
+ long getBlockCacheSize();
+
+ /**
+ * Get the count of hits to the block cache
+ */
+ long getBlockCacheHitCount();
+
+ /**
+ * Get the count of misses to the block cache.
+ */
+ long getBlockCacheMissCount();
+
+ /**
+ * Get the number of items evicted from the block cache.
+ */
+ long getBlockCacheEvictedCount();
+
+ /**
+ * Get the percent of all requests that hit the block cache.
+ */
+ int getBlockCacheHitPercent();
+
+ /**
+ * Get the percent of requests with the block cache turned on that hit the block cache.
+ */
+ int getBlockCacheHitCachingPercent();
+
+ /**
+ * Force a re-computation of the metrics.
+ */
+ void forceRecompute();
+
+ /**
+ * Get the amount of time that updates were blocked.
+ */
+ long getUpdatesBlockedTime();
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
new file mode 100644
index 0000000..6546089
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Interface of class that will wrap an HRegion and export numbers so they can be
+ * used in MerticsRegionSource
+ */
+public interface MetricsRegionWrapper {
+
+ /**
+ * Get the name of the table the region belongs to.
+ * @return The string version of the table name.
+ */
+ String getTableName();
+
+ /**
+ * Get the name of the region.
+ * @return The encoded name of the region.
+ */
+ String getRegionName();
+
+ /**
+ * Get the number of stores hosted on this region server.
+ */
+ long getNumStores();
+
+ /**
+ * Get the number of store files hosted on this region server.
+ */
+ long getNumStoreFiles();
+
+ /**
+ * Get the size of the memstore on this region server.
+ */
+ long getMemstoreSize();
+
+ /**
+ * Get the total size of the store files this region server is serving from.
+ */
+ long getStoreFileSize();
+
+ /**
+ * Get the total number of read requests that have been issued against this region
+ */
+ long getReadRequestCount();
+
+ /**
+ * Get the total number of mutations that have been issued against this region.
+ */
+ long getWriteRequestCount();
+
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java
new file mode 100644
index 0000000..0534ec8
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSource;
+
+/**
+ * Provides access to gauges and counters. Implementers will hide the details of hadoop1 or
+ * hadoop2's metrics2 classes and publishing.
+ */
+public interface MetricsReplicationSource extends BaseSource {
+ /**
+ * The name of the metrics
+ */
+ static final String METRICS_NAME = "ReplicationMetrics";
+
+ /**
+ * The name of the metrics context that metrics will be under.
+ */
+ static final String METRICS_CONTEXT = "hregionserver";
+
+ /**
+ * The name of the metrics context that metrics will be under.
+ */
+ static final String METRICS_JMX_CONTEXT = "HRegionServer,sub=Replication";
+
+ /**
+ * A description.
+ */
+ static final String METRICS_DESCRIPTION = "Metrics about HBase replication";
+
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSource.java
deleted file mode 100644
index 0090f49..0000000
--- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSource.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
-
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
-
-/**
- * Provides access to gauges and counters. Implementers will hide the details of hadoop1 or
- * hadoop2's metrics2 classes and publishing.
- */
-public interface ReplicationMetricsSource extends BaseMetricsSource {
- /**
- * The name of the metrics
- */
- static final String METRICS_NAME = "ReplicationMetrics";
-
- /**
- * The name of the metrics context that metrics will be under.
- */
- static final String METRICS_CONTEXT = "replicationmetrics";
-
- /**
- * The name of the metrics context that metrics will be under.
- */
- static final String METRICS_JMX_CONTEXT = "ReplicationMetrics";
-
- /**
- * A description.
- */
- static final String METRICS_DESCRIPTION = "Metrics about HBase replication";
-
-}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java
new file mode 100644
index 0000000..ef61408
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.metrics.BaseSource;
+
+/**
+ * Interface of the Metrics Source that will export data to Hadoop's Metrics2 system.
+ */
+public interface MetricsRESTSource extends BaseSource {
+
+ public static String METRICS_NAME = "Rest";
+
+ public static String CONTEXT = "rest";
+
+ public static String JMX_CONTEXT = "Rest";
+
+ public static String METRICS_DESCRIPTION = "Metrics about the HBase REST server";
+
+ static String REQUEST_KEY = "requests";
+
+ static String SUCCESSFUL_GET_KEY = "successfulGet";
+
+ static String SUCCESSFUL_PUT_KEY = "successfulPut";
+
+ static String SUCCESSFUL_DELETE_KEY = "successfulDelete";
+
+ static String FAILED_GET_KEY = "failedGet";
+
+ static String FAILED_PUT_KEY = "failedPut";
+
+ static String FAILED_DELETE_KEY = "failedDelete";
+
+ /**
+ * Increment the number of requests
+ * @param inc Ammount to increment by
+ */
+ void incrementRequests(int inc);
+
+ /**
+ * Increment the number of successful Get requests.
+ * @param inc Number of successful get requests.
+ */
+ void incrementSucessfulGetRequests(int inc);
+
+ /**
+ * Increment the number of successful Put requests.
+ * @param inc Number of successful put requests.
+ */
+ void incrementSucessfulPutRequests(int inc);
+
+ /**
+ * Increment the number of successful Delete requests.
+ * @param inc
+ */
+ void incrementSucessfulDeleteRequests(int inc);
+
+ /**
+ * Increment the number of failed Put Requests.
+ * @param inc Number of failed Put requests.
+ */
+ void incrementFailedPutRequests(int inc);
+
+ /**
+ * Increment the number of failed Get requests.
+ * @param inc The number of failed Get Requests.
+ */
+ void incrementFailedGetRequests(int inc);
+
+ /**
+ * Increment the number of failed Delete requests.
+ * @param inc The number of failed delete requests.
+ */
+ void incrementFailedDeleteRequests(int inc);
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSource.java
deleted file mode 100644
index 9d7f691..0000000
--- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSource.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.metrics;
-
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
-
-/**
- * Interface of the Metrics Source that will export data to Hadoop's Metrics2 system.
- */
-public interface RESTMetricsSource extends BaseMetricsSource {
-
- public static String METRICS_NAME = "Rest";
-
- public static String CONTEXT = "rest";
-
- public static String JMX_CONTEXT = "Rest";
-
- public static String METRICS_DESCRIPTION = "Metrics about the HBase REST server";
-
- static String REQUEST_KEY = "requests";
-
- static String SUCCESSFUL_GET_KEY = "successfulGet";
-
- static String SUCCESSFUL_PUT_KEY = "successfulPut";
-
- static String SUCCESSFUL_DELETE_KEY = "successfulDelete";
-
- static String FAILED_GET_KEY = "failedGet";
-
- static String FAILED_PUT_KEY = "failedPut";
-
- static String FAILED_DELETE_KEY = "failedDelete";
-
- /**
- * Increment the number of requests
- * @param inc Ammount to increment by
- */
- void incrementRequests(int inc);
-
- /**
- * Increment the number of successful Get requests.
- * @param inc Number of successful get requests.
- */
- void incrementSucessfulGetRequests(int inc);
-
- /**
- * Increment the number of successful Put requests.
- * @param inc Number of successful put requests.
- */
- void incrementSucessfulPutRequests(int inc);
-
- /**
- * Increment the number of successful Delete requests.
- * @param inc
- */
- void incrementSucessfulDeleteRequests(int inc);
-
- /**
- * Increment the number of failed Put Requests.
- * @param inc Number of failed Put requests.
- */
- void incrementFailedPutRequests(int inc);
-
- /**
- * Increment the number of failed Get requests.
- * @param inc The number of failed Get Requests.
- */
- void incrementFailedGetRequests(int inc);
-
- /**
- * Increment the number of failed Delete requests.
- * @param inc The number of failed delete requests.
- */
- void incrementFailedDeleteRequests(int inc);
-}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java
new file mode 100644
index 0000000..206154f
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift;
+
+import org.apache.hadoop.hbase.metrics.BaseSource;
+
+/**
+ * Inteface of a class that will export metrics about Thrift to hadoop's metrics2.
+ */
+public interface MetricsThriftServerSource extends BaseSource {
+
+ static final String BATCH_GET_KEY = "batchGet";
+ static final String BATCH_MUTATE_KEY = "batchMutate";
+ static final String TIME_IN_QUEUE_KEY = "timeInQueue";
+ static final String THRIFT_CALL_KEY = "thriftCall";
+ static final String SLOW_THRIFT_CALL_KEY = "slowThriftCall";
+ static final String CALL_QUEUE_LEN_KEY = "callQueueLen";
+
+ /**
+ * Add how long an operation was in the queue.
+ * @param time
+ */
+ void incTimeInQueue(long time);
+
+ /**
+ * Set the call queue length.
+ * @param len Time
+ */
+ void setCallQueueLen(int len);
+
+ /**
+ * Add how many keys were in a batch get.
+ * @param diff Num Keys
+ */
+ void incNumRowKeysInBatchGet(int diff);
+
+ /**
+ * Add how many keys were in a batch mutate.
+ * @param diff Num Keys
+ */
+ void incNumRowKeysInBatchMutate(int diff);
+
+ /**
+ * Add how long a method took
+ * @param name Method name
+ * @param time Time
+ */
+ void incMethodTime(String name, long time);
+
+ /**
+ * Add how long a call took
+ * @param time Time
+ */
+ void incCall(long time);
+
+ /**
+ * Increment how long a slow call took.
+ * @param time Time
+ */
+ void incSlowCall(long time);
+
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java
new file mode 100644
index 0000000..8fca2cf
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift;
+
+/** Factory that will be used to create metrics sources for the two diffent types of thrift servers. */
+public interface MetricsThriftServerSourceFactory {
+
+ static final String METRICS_NAME = "Thrift";
+ static final String METRICS_DESCRIPTION = "Thrift Server Metrics";
+ static final String THRIFT_ONE_METRICS_CONTEXT = "thrift-one";
+ static final String THRIFT_ONE_JMX_CONTEXT = "Thrift,sub=ThriftOne";
+ static final String THRIFT_TWO_METRICS_CONTEXT = "thrift-two";
+ static final String THRIFT_TWO_JMX_CONTEXT = "Thrift,sub=ThriftTwo";
+
+ /** Create a Source for a thrift one server */
+ MetricsThriftServerSource createThriftOneSource();
+
+ /** Create a Source for a thrift two server */
+ MetricsThriftServerSource createThriftTwoSource();
+
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSource.java
deleted file mode 100644
index f6ba023..0000000
--- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSource.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.thrift.metrics;
-
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
-
-/**
- * Inteface of a class that will export metrics about Thrift to hadoop's metrics2.
- */
-public interface ThriftServerMetricsSource extends BaseMetricsSource {
-
- static final String BATCH_GET_KEY = "batchGet";
- static final String BATCH_MUTATE_KEY = "batchMutate";
- static final String TIME_IN_QUEUE_KEY = "timeInQueue";
- static final String THRIFT_CALL_KEY = "thriftCall";
- static final String SLOW_THRIFT_CALL_KEY = "slowThriftCall";
- static final String CALL_QUEUE_LEN_KEY = "callQueueLen";
-
- /**
- * Add how long an operation was in the queue.
- * @param time
- */
- void incTimeInQueue(long time);
-
- /**
- * Set the call queue length.
- * @param len Time
- */
- void setCallQueueLen(int len);
-
- /**
- * Add how many keys were in a batch get.
- * @param diff Num Keys
- */
- void incNumRowKeysInBatchGet(int diff);
-
- /**
- * Add how many keys were in a batch mutate.
- * @param diff Num Keys
- */
- void incNumRowKeysInBatchMutate(int diff);
-
- /**
- * Add how long a method took
- * @param name Method name
- * @param time Time
- */
- void incMethodTime(String name, long time);
-
- /**
- * Add how long a call took
- * @param time Time
- */
- void incCall(long time);
-
- /**
- * Increment how long a slow call took.
- * @param time Time
- */
- void incSlowCall(long time);
-
-}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactory.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactory.java
deleted file mode 100644
index be6b5f9..0000000
--- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactory.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.thrift.metrics;
-
-/** Factory that will be used to create metrics sources for the two diffent types of thrift servers. */
-public interface ThriftServerMetricsSourceFactory {
-
- static final String METRICS_NAME = "Thrift";
- static final String METRICS_DESCRIPTION = "Thrift Server Metrics";
- static final String THRIFT_ONE_METRICS_CONTEXT = "thrift-one";
- static final String THRIFT_ONE_JMX_CONTEXT = "Thrift,sub=ThriftOne";
- static final String THRIFT_TWO_METRICS_CONTEXT = "thrift-two";
- static final String THRIFT_TWO_JMX_CONTEXT = "Thrift,sub=ThriftTwo";
-
- ThriftServerMetricsSource createThriftOneSource();
-
- ThriftServerMetricsSource createThriftTwoSource();
-
-}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricHistogram.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricHistogram.java
deleted file mode 100644
index c5d6e49..0000000
--- hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricHistogram.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics;
-
-/**
- *
- */
-public interface MetricHistogram {
-
- static final String NUM_OPS_METRIC_NAME = "_num_ops";
- static final String MIN_METRIC_NAME = "_min";
- static final String MAX_METRIC_NAME = "_max";
- static final String MEAN_METRIC_NAME = "_mean";
- static final String MEDIAN_METRIC_NAME = "_median";
- static final String SEVENTY_FIFTH_PERCENTILE_METRIC_NAME = "_75th_percentile";
- static final String NINETY_FIFTH_PERCENTILE_METRIC_NAME = "_95th_percentile";
- static final String NINETY_NINETH_PERCENTILE_METRIC_NAME = "_99th_percentile";
-
- void add(long value);
-
-}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricsExecutor.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricsExecutor.java
deleted file mode 100644
index 4094922..0000000
--- hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricsExecutor.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics;
-
-import java.util.concurrent.ScheduledExecutorService;
-
-/**
- *
- */
-public interface MetricsExecutor {
-
- ScheduledExecutorService getExecutor();
-
- void stop();
-
-}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java
new file mode 100644
index 0000000..f431632
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2;
+
+/**
+ * Metrics Histogram interface. Implementing classes will expose computed
+ * quartile values through the metrics system.
+ */
+public interface MetricHistogram {
+
+ //Strings used to create metrics names.
+ static final String NUM_OPS_METRIC_NAME = "_num_ops";
+ static final String MIN_METRIC_NAME = "_min";
+ static final String MAX_METRIC_NAME = "_max";
+ static final String MEAN_METRIC_NAME = "_mean";
+ static final String MEDIAN_METRIC_NAME = "_median";
+ static final String SEVENTY_FIFTH_PERCENTILE_METRIC_NAME = "_75th_percentile";
+ static final String NINETY_FIFTH_PERCENTILE_METRIC_NAME = "_95th_percentile";
+ static final String NINETY_NINETH_PERCENTILE_METRIC_NAME = "_99th_percentile";
+
+ /**
+ * Add a single value to a histogram's stream of values.
+ * @param value
+ */
+ void add(long value);
+
+}
diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java
new file mode 100644
index 0000000..f2ebc94
--- /dev/null
+++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2;
+
+import java.util.concurrent.ScheduledExecutorService;
+
+/**
+ * ScheduledExecutorService for metrics.
+ */
+public interface MetricsExecutor {
+
+ ScheduledExecutorService getExecutor();
+
+ void stop();
+
+}
diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsSourceFactory.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsSourceFactory.java
new file mode 100644
index 0000000..43d4f6f
--- /dev/null
+++ hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsSourceFactory.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSource;
+import org.junit.Test;
+
+/**
+ * Test for the CompatibilitySingletonFactory and building MetricsMasterSource
+ */
+public class TestMasterMetricsSourceFactory {
+
+ @Test(expected=RuntimeException.class)
+ public void testGetInstanceNoHadoopCompat() throws Exception {
+ //This should throw an exception because there is no compat lib on the class path.
+ CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class);
+
+ }
+}
diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceFactory.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceFactory.java
deleted file mode 100644
index 9f28c49..0000000
--- hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceFactory.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-/**
- * Test for the CompatibilitySingletonFactory and building MasterMetricsSource
- */
-public class TestMasterMetricsSourceFactory {
-
- @Test(expected=RuntimeException.class)
- public void testGetInstanceNoHadoopCompat() throws Exception {
- //This should throw an exception because there is no compat lib on the class path.
- CompatibilitySingletonFactory.getInstance(MasterMetricsSource.class);
-
- }
-}
diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetricsSourceFactory.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetricsSourceFactory.java
new file mode 100644
index 0000000..1b62fc7
--- /dev/null
+++ hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetricsSourceFactory.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory;
+import org.junit.Test;
+
+/**
+ * Test for the CompatibilitySingletonFactory and building MetricsRegionServerSource
+ */
+public class TestRegionServerMetricsSourceFactory {
+
+ @Test(expected=RuntimeException.class)
+ public void testGetInstanceNoHadoopCompat() throws Exception {
+ //This should throw an exception because there is no compat lib on the class path.
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
+
+ }
+}
diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceFactory.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceFactory.java
new file mode 100644
index 0000000..c76f7f4
--- /dev/null
+++ hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceFactory.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource;
+import org.junit.Test;
+
+/**
+ * Test for the CompatibilitySingletonFactory and building MetricsReplicationSource
+ */
+public class TestReplicationMetricsSourceFactory {
+
+ @Test(expected=RuntimeException.class)
+ public void testGetInstanceNoHadoopCompat() throws Exception {
+ //This should throw an exception because there is no compat lib on the class path.
+ CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class);
+ }
+}
diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceFactory.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceFactory.java
deleted file mode 100644
index 9378dff..0000000
--- hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceFactory.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-/**
- * Test for the CompatibilitySingletonFactory and building ReplicationMetricsSource
- */
-public class TestReplicationMetricsSourceFactory {
-
- @Test(expected=RuntimeException.class)
- public void testGetInstanceNoHadoopCompat() throws Exception {
- //This should throw an exception because there is no compat lib on the class path.
- CompatibilitySingletonFactory.getInstance(ReplicationMetricsSource.class);
- }
-}
diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSource.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSource.java
new file mode 100644
index 0000000..adaf56f
--- /dev/null
+++ hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSource.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.rest.MetricsRESTSource;
+import org.junit.Test;
+
+/**
+ * Test of Rest Metrics Source interface.
+ */
+public class TestRESTMetricsSource {
+
+
+ @Test(expected=RuntimeException.class)
+ public void testGetInstanceNoHadoopCompat() throws Exception {
+ //This should throw an exception because there is no compat lib on the class path.
+ CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class);
+ }
+
+}
diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSource.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSource.java
deleted file mode 100644
index e3f18f7..0000000
--- hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSource.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-/**
- * Test of Rest Metrics Source interface.
- */
-public class TestRESTMetricsSource {
-
-
- @Test(expected=RuntimeException.class)
- public void testGetInstanceNoHadoopCompat() throws Exception {
- //This should throw an exception because there is no compat lib on the class path.
- CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class);
- }
-
-}
diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java
index fc668bf..4f0b3b7 100644
--- hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java
+++ hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hbase.test;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
+import org.apache.hadoop.hbase.metrics.BaseSource;
/** Interface of a class to make assertions about metrics values. */
public interface MetricsAssertHelper {
@@ -28,128 +28,128 @@ public interface MetricsAssertHelper {
*
* @param name The name of the tag.
* @param expected The expected value
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertTag(String name, String expected, BaseMetricsSource source);
+ public void assertTag(String name, String expected, BaseSource source);
/**
* Assert that a gauge exists and that it's value is equal to the expected value.
*
* @param name The name of the gauge
* @param expected The expected value of the gauge.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGauge(String name, long expected, BaseMetricsSource source);
+ public void assertGauge(String name, long expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is greater than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be greater than
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGaugeGt(String name, long expected, BaseMetricsSource source);
+ public void assertGaugeGt(String name, long expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is less than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be less than
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGaugeLt(String name, long expected, BaseMetricsSource source);
+ public void assertGaugeLt(String name, long expected, BaseSource source);
/**
* Assert that a gauge exists and that it's value is equal to the expected value.
*
* @param name The name of the gauge
* @param expected The expected value of the gauge.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGauge(String name, double expected, BaseMetricsSource source);
+ public void assertGauge(String name, double expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is greater than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be greater than
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGaugeGt(String name, double expected, BaseMetricsSource source);
+ public void assertGaugeGt(String name, double expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is less than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be less than
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGaugeLt(String name, double expected, BaseMetricsSource source);
+ public void assertGaugeLt(String name, double expected, BaseSource source);
/**
* Assert that a counter exists and that it's value is equal to the expected value.
*
* @param name The name of the counter.
* @param expected The expected value
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertCounter(String name, long expected, BaseMetricsSource source);
+ public void assertCounter(String name, long expected, BaseSource source);
/**
* Assert that a counter exists and that it's value is greater than the given value.
*
* @param name The name of the counter.
* @param expected The value the counter is expected to be greater than.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertCounterGt(String name, long expected, BaseMetricsSource source);
+ public void assertCounterGt(String name, long expected, BaseSource source);
/**
* Assert that a counter exists and that it's value is less than the given value.
*
* @param name The name of the counter.
* @param expected The value the counter is expected to be less than.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertCounterLt(String name, long expected, BaseMetricsSource source);
+ public void assertCounterLt(String name, long expected, BaseSource source);
/**
* Get the value of a counter.
*
* @param name name of the counter.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
* @return long value of the counter.
*/
- public long getCounter(String name, BaseMetricsSource source);
+ public long getCounter(String name, BaseSource source);
/**
* Get the value of a gauge as a double.
*
* @param name name of the gauge.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
* @return double value of the gauge.
*/
- public double getGaugeDouble(String name, BaseMetricsSource source);
+ public double getGaugeDouble(String name, BaseSource source);
/**
* Get the value of a gauge as a long.
*
* @param name name of the gauge.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link org.apache.hadoop.hbase.metrics.BaseSource} that will provide the tags,
* gauges, and counters.
* @return long value of the gauge.
*/
- public long getGaugeLong(String name, BaseMetricsSource source);
+ public long getGaugeLong(String name, BaseSource source);
}
diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactory.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactory.java
new file mode 100644
index 0000000..2d7f17d
--- /dev/null
+++ hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactory.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory;
+import org.junit.Test;
+
+/**
+ * Test for the interface of MetricsThriftServerSourceFactory
+ */
+public class TestThriftServerMetricsSourceFactory {
+
+
+ @Test(expected=RuntimeException.class)
+ public void testGetInstanceNoHadoopCompat() throws RuntimeException {
+ //This should throw an exception because there is no compat lib on the class path.
+ CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class);
+ }
+
+}
diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactory.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactory.java
deleted file mode 100644
index b1f253e..0000000
--- hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactory.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.thrift.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-/**
- * Test for the interface of ThriftServerMetricsSourceFactory
- */
-public class TestThriftServerMetricsSourceFactory {
-
-
- @Test(expected=RuntimeException.class)
- public void testGetInstanceNoHadoopCompat() throws RuntimeException {
- //This should throw an exception because there is no compat lib on the class path.
- CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class);
- }
-
-}
diff --git hbase-hadoop1-compat/pom.xml hbase-hadoop1-compat/pom.xml
index eacde23..6b55471 100644
--- hbase-hadoop1-compat/pom.xml
+++ hbase-hadoop1-compat/pom.xml
@@ -98,6 +98,10 @@ limitations under the License.
metrics-core
+ log4j
+ log4j
+
+ org.apache.hadoophadoop-test${hadoop-one.version}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java
new file mode 100644
index 0000000..6cb3f43
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+/**
+ * Factory to create MetricsMasterSource when given a MetricsMasterWrapper
+ */
+public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory {
+ private static enum FactoryStorage {
+ INSTANCE;
+ MetricsMasterSource masterSource;
+ }
+
+ @Override
+ public synchronized MetricsMasterSource create(MetricsMasterWrapper masterWrapper) {
+ if (FactoryStorage.INSTANCE.masterSource == null ) {
+ FactoryStorage.INSTANCE.masterSource = new MetricsMasterSourceImpl(masterWrapper);
+ }
+ return FactoryStorage.INSTANCE.masterSource;
+ }
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
new file mode 100644
index 0000000..1397f79
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.MetricsBuilder;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
+import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
+
+/** Hadoop1 implementation of MetricsMasterSource. */
+public class MetricsMasterSourceImpl
+ extends BaseSourceImpl implements MetricsMasterSource {
+
+ private static final Log LOG = LogFactory.getLog(MetricsMasterSourceImpl.class.getName());
+
+ MetricMutableCounterLong clusterRequestsCounter;
+ MetricMutableGaugeLong ritGauge;
+ MetricMutableGaugeLong ritCountOverThresholdGauge;
+ MetricMutableGaugeLong ritOldestAgeGauge;
+
+ private final MetricsMasterWrapper masterWrapper;
+ private MetricMutableHistogram splitTimeHisto;
+ private MetricMutableHistogram splitSizeHisto;
+
+ public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper);
+ }
+
+ public MetricsMasterSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext,
+ MetricsMasterWrapper masterWrapper) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ this.masterWrapper = masterWrapper;
+ }
+
+ @Override
+ public void init() {
+ super.init();
+ clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0l);
+ ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l);
+ ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, "", 0l);
+ ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, "", 0l);
+ splitTimeHisto = metricsRegistry.newHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC);
+ splitSizeHisto = metricsRegistry.newHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC);
+ }
+
+ public void incRequests(final int inc) {
+ this.clusterRequestsCounter.incr(inc);
+ }
+
+ public void setRIT(int ritCount) {
+ ritGauge.set(ritCount);
+ }
+
+ public void setRITCountOverThreshold(int ritCount) {
+ ritCountOverThresholdGauge.set(ritCount);
+ }
+
+ public void setRITOldestAge(long ritCount) {
+ ritOldestAgeGauge.set(ritCount);
+ }
+
+ @Override
+ public void updateSplitTime(long time) {
+ splitTimeHisto.add(time);
+ }
+
+ @Override
+ public void updateSplitSize(long size) {
+ splitSizeHisto.add(size);
+ }
+
+ /**
+ * Method to export all the metrics.
+ *
+ * @param metricsBuilder Builder to accept metrics
+ * @param all push all or only changed?
+ */
+ @Override
+ public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
+
+ MetricsRecordBuilder metricsRecordBuilder = metricsBuilder.addRecord(metricsName)
+ .setContext(metricsContext);
+
+ // masterWrapper can be null because this function is called inside of init.
+ if (masterWrapper != null) {
+ metricsRecordBuilder
+ .addGauge(MASTER_ACTIVE_TIME_NAME,
+ MASTER_ACTIVE_TIME_DESC, masterWrapper.getMasterActiveTime())
+ .addGauge(MASTER_START_TIME_NAME,
+ MASTER_START_TIME_DESC, masterWrapper.getMasterStartTime())
+ .addGauge(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC, masterWrapper.getAverageLoad())
+ .addGauge(NUM_REGION_SERVERS_NAME,
+ NUMBER_OF_REGION_SERVERS_DESC, masterWrapper.getRegionServers())
+ .addGauge(NUM_DEAD_REGION_SERVERS_NAME,
+ NUMBER_OF_DEAD_REGION_SERVERS_DESC,
+ masterWrapper.getDeadRegionServers())
+ .tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, masterWrapper.getZookeeperQuorum())
+ .tag(SERVER_NAME_NAME, SERVER_NAME_DESC, masterWrapper.getServerName())
+ .tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, masterWrapper.getClusterId())
+ .tag(IS_ACTIVE_MASTER_NAME,
+ IS_ACTIVE_MASTER_DESC,
+ String.valueOf(masterWrapper.getIsActiveMaster()));
+ }
+
+ metricsRegistry.snapshot(metricsRecordBuilder, all);
+ }
+
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java
deleted file mode 100644
index 4a17046..0000000
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.metrics;
-
-/**
- * Factory to create MasterMetricsSource when given a MasterMetricsWrapper
- */
-public class MasterMetricsSourceFactoryImpl implements MasterMetricsSourceFactory {
- private static enum FactoryStorage {
- INSTANCE;
- MasterMetricsSource source;
- }
-
- @Override
- public synchronized MasterMetricsSource create(MasterMetricsWrapper beanWrapper) {
- if (FactoryStorage.INSTANCE.source == null ) {
- FactoryStorage.INSTANCE.source = new MasterMetricsSourceImpl(beanWrapper);
- }
- return FactoryStorage.INSTANCE.source;
- }
-}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java
deleted file mode 100644
index 85c7373..0000000
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.metrics;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
-import org.apache.hadoop.metrics2.MetricsBuilder;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
-import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
-
-/** Hadoop1 implementation of MasterMetricsSource. */
-public class MasterMetricsSourceImpl
- extends BaseMetricsSourceImpl implements MasterMetricsSource {
-
- private static final Log LOG = LogFactory.getLog(MasterMetricsSourceImpl.class.getName());
-
- MetricMutableCounterLong clusterRequestsCounter;
- MetricMutableGaugeLong ritGauge;
- MetricMutableGaugeLong ritCountOverThresholdGauge;
- MetricMutableGaugeLong ritOldestAgeGauge;
-
- private final MasterMetricsWrapper masterWrapper;
- private MetricMutableHistogram splitTimeHisto;
- private MetricMutableHistogram splitSizeHisto;
-
- public MasterMetricsSourceImpl(MasterMetricsWrapper masterWrapper) {
- this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper);
- }
-
- public MasterMetricsSourceImpl(String metricsName,
- String metricsDescription,
- String metricsContext,
- String metricsJmxContext,
- MasterMetricsWrapper masterWrapper) {
- super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
- this.masterWrapper = masterWrapper;
- }
-
- @Override
- public void init() {
- super.init();
- clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0l);
- ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l);
- ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, "", 0l);
- ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, "", 0l);
- splitTimeHisto = metricsRegistry.newHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC);
- splitSizeHisto = metricsRegistry.newHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC);
- }
-
- public void incRequests(final int inc) {
- this.clusterRequestsCounter.incr(inc);
- }
-
- public void setRIT(int ritCount) {
- ritGauge.set(ritCount);
- }
-
- public void setRITCountOverThreshold(int ritCount) {
- ritCountOverThresholdGauge.set(ritCount);
- }
-
- public void setRITOldestAge(long ritCount) {
- ritOldestAgeGauge.set(ritCount);
- }
-
- @Override
- public void updateSplitTime(long time) {
- splitTimeHisto.add(time);
- }
-
- @Override
- public void updateSplitSize(long size) {
- splitSizeHisto.add(size);
- }
-
- /**
- * Method to export all the metrics.
- *
- * @param metricsBuilder Builder to accept metrics
- * @param all push all or only changed?
- */
- @Override
- public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
-
- MetricsRecordBuilder metricsRecordBuilder = metricsBuilder.addRecord(metricsName)
- .setContext(metricsContext);
-
- // masterWrapper can be null because this function is called inside of init.
- if (masterWrapper != null) {
- metricsRecordBuilder
- .addGauge(MASTER_ACTIVE_TIME_NAME,
- MASTER_ACTIVE_TIME_DESC, masterWrapper.getMasterActiveTime())
- .addGauge(MASTER_START_TIME_NAME,
- MASTER_START_TIME_DESC, masterWrapper.getMasterStartTime())
- .addGauge(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC, masterWrapper.getAverageLoad())
- .addGauge(NUM_REGION_SERVERS_NAME,
- NUMBER_OF_REGION_SERVERS_DESC, masterWrapper.getRegionServers())
- .addGauge(NUM_DEAD_REGION_SERVERS_NAME,
- NUMBER_OF_DEAD_REGION_SERVERS_DESC,
- masterWrapper.getDeadRegionServers())
- .tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, masterWrapper.getZookeeperQuorum())
- .tag(SERVER_NAME_NAME, SERVER_NAME_DESC, masterWrapper.getServerName())
- .tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, masterWrapper.getClusterId())
- .tag(IS_ACTIVE_MASTER_NAME,
- IS_ACTIVE_MASTER_DESC,
- String.valueOf(masterWrapper.getIsActiveMaster()));
- }
-
- metricsRegistry.snapshot(metricsRecordBuilder, true);
- }
-
-}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java
deleted file mode 100644
index 0943370..0000000
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.metrics;
-
-import org.apache.hadoop.metrics2.MetricsBuilder;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
-import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
-import org.apache.hadoop.metrics2.lib.MetricMutableQuantiles;
-import org.apache.hadoop.metrics2.source.JvmMetricsSource;
-
-/**
- * Hadoop 1 implementation of BaseMetricsSource (using metrics2 framework)
- */
-public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
-
- private static enum DefaultMetricsSystemInitializer {
- INSTANCE;
- private boolean inited = false;
- private JvmMetricsSource jvmMetricsSource;
-
- synchronized void init(String name) {
- if (inited) return;
- inited = true;
- DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME);
- jvmMetricsSource = JvmMetricsSource.create(name, "");
-
- }
- }
- private static boolean defaultMetricsSystemInited = false;
- public static final String HBASE_METRICS_SYSTEM_NAME = "hbase";
-
- protected final DynamicMetricsRegistry metricsRegistry;
- protected final String metricsName;
- protected final String metricsDescription;
- protected final String metricsContext;
- protected final String metricsJmxContext;
-
- public BaseMetricsSourceImpl(
- String metricsName,
- String metricsDescription,
- String metricsContext,
- String metricsJmxContext) {
-
- this.metricsName = metricsName;
- this.metricsDescription = metricsDescription;
- this.metricsContext = metricsContext;
- this.metricsJmxContext = metricsJmxContext;
-
- metricsRegistry = new DynamicMetricsRegistry(metricsName).setContext(metricsContext);
- DefaultMetricsSystemInitializer.INSTANCE.init(metricsName);
-
- //Register this instance.
- DefaultMetricsSystem.INSTANCE.registerSource(metricsJmxContext, metricsDescription, this);
- init();
- }
-
- public void init() {
- this.metricsRegistry.clearMetrics();
- }
-
-
- /**
- * Set a single gauge to a value.
- *
- * @param gaugeName gauge name
- * @param value the new value of the gauge.
- */
- public void setGauge(String gaugeName, long value) {
- MetricMutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, value);
- gaugeInt.set(value);
- }
-
- /**
- * Add some amount to a gauge.
- *
- * @param gaugeName The name of the gauge to increment.
- * @param delta The amount to increment the gauge by.
- */
- public void incGauge(String gaugeName, long delta) {
- MetricMutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l);
- gaugeInt.incr(delta);
- }
-
- /**
- * Decrease the value of a named gauge.
- *
- * @param gaugeName The name of the gauge.
- * @param delta the ammount to subtract from a gauge value.
- */
- public void decGauge(String gaugeName, long delta) {
- MetricMutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l);
- gaugeInt.decr(delta);
- }
-
- /**
- * Increment a named counter by some value.
- *
- * @param key the name of the counter
- * @param delta the ammount to increment
- */
- public void incCounters(String key, long delta) {
- MetricMutableCounterLong counter = metricsRegistry.getLongCounter(key, 0l);
- counter.incr(delta);
-
- }
-
- @Override
- public void updateHistogram(String name, long value) {
- MetricMutableHistogram histo = metricsRegistry.getHistogram(name);
- histo.add(value);
- }
-
- @Override
- public void updateQuantile(String name, long value) {
- MetricMutableQuantiles histo = metricsRegistry.getQuantile(name);
- histo.add(value);
- }
-
- /**
- * Remove a named gauge.
- *
- * @param key
- */
- public void removeGauge(String key) {
- metricsRegistry.removeMetric(key);
- }
-
- /**
- * Remove a named counter.
- *
- * @param key
- */
- public void removeCounter(String key) {
- metricsRegistry.removeMetric(key);
- }
-
- /**
- * Method to export all the metrics.
- *
- * @param metricsBuilder Builder to accept metrics
- * @param all push all or only changed?
- */
- @Override
- public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
- metricsRegistry.snapshot(metricsBuilder.addRecord(metricsRegistry.name()), all);
- }
-
- /**
- * Used to get at the DynamicMetricsRegistry.
- * @return DynamicMetricsRegistry
- */
- protected DynamicMetricsRegistry getMetricsRegistry() {
- return metricsRegistry;
- }
-}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java
new file mode 100644
index 0000000..185fa59
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.metrics;
+
+import org.apache.hadoop.metrics2.MetricsBuilder;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
+import org.apache.hadoop.metrics2.lib.*;
+import org.apache.hadoop.metrics2.source.JvmMetricsSource;
+
+/**
+ * Hadoop 1 implementation of BaseSource (using metrics2 framework)
+ */
+public class BaseSourceImpl implements BaseSource, MetricsSource {
+
+ private static enum DefaultMetricsSystemInitializer {
+ INSTANCE;
+ private boolean inited = false;
+ private JvmMetricsSource jvmMetricsSource;
+
+ synchronized void init(String name) {
+ if (inited) return;
+ inited = true;
+ DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME);
+ jvmMetricsSource = JvmMetricsSource.create(name, "");
+
+ }
+ }
+ private static boolean defaultMetricsSystemInited = false;
+
+
+ protected final DynamicMetricsRegistry metricsRegistry;
+ protected final String metricsName;
+ protected final String metricsDescription;
+ protected final String metricsContext;
+ protected final String metricsJmxContext;
+
+ public BaseSourceImpl(
+ String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+
+ this.metricsName = metricsName;
+ this.metricsDescription = metricsDescription;
+ this.metricsContext = metricsContext;
+ this.metricsJmxContext = metricsJmxContext;
+
+ metricsRegistry = new DynamicMetricsRegistry(metricsName).setContext(metricsContext);
+ DefaultMetricsSystemInitializer.INSTANCE.init(metricsName);
+
+ //Register this instance.
+ DefaultMetricsSystem.INSTANCE.registerSource(metricsJmxContext, metricsDescription, this);
+ init();
+ }
+
+ public void init() {
+ this.metricsRegistry.clearMetrics();
+ }
+
+
+ /**
+ * Set a single gauge to a value.
+ *
+ * @param gaugeName gauge name
+ * @param value the new value of the gauge.
+ */
+ public void setGauge(String gaugeName, long value) {
+ MetricMutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, value);
+ gaugeInt.set(value);
+ }
+
+ /**
+ * Add some amount to a gauge.
+ *
+ * @param gaugeName The name of the gauge to increment.
+ * @param delta The amount to increment the gauge by.
+ */
+ public void incGauge(String gaugeName, long delta) {
+ MetricMutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l);
+ gaugeInt.incr(delta);
+ }
+
+ /**
+ * Decrease the value of a named gauge.
+ *
+ * @param gaugeName The name of the gauge.
+ * @param delta the ammount to subtract from a gauge value.
+ */
+ public void decGauge(String gaugeName, long delta) {
+ MetricMutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l);
+ gaugeInt.decr(delta);
+ }
+
+ /**
+ * Increment a named counter by some value.
+ *
+ * @param key the name of the counter
+ * @param delta the ammount to increment
+ */
+ public void incCounters(String key, long delta) {
+ MetricMutableCounterLong counter = metricsRegistry.getLongCounter(key, 0l);
+ counter.incr(delta);
+
+ }
+
+ @Override
+ public void updateHistogram(String name, long value) {
+ MetricMutableHistogram histo = metricsRegistry.getHistogram(name);
+ histo.add(value);
+ }
+
+ @Override
+ public void updateQuantile(String name, long value) {
+ MetricMutableQuantiles histo = metricsRegistry.getQuantile(name);
+ histo.add(value);
+ }
+
+ /**
+ * Remove a named metric.
+ *
+ * @param key
+ */
+ public void removeMetric(String key) {
+ metricsRegistry.removeMetric(key);
+ JmxCacheBuster.clearJmxCache();
+ }
+
+
+ /**
+ * Method to export all the metrics.
+ *
+ * @param metricsBuilder Builder to accept metrics
+ * @param all push all or only changed?
+ */
+ @Override
+ public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
+ MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
+ .setContext(metricsContext);
+ metricsRegistry.snapshot(mrb, all);
+ }
+
+ /**
+ * Used to get at the DynamicMetricsRegistry.
+ * @return DynamicMetricsRegistry
+ */
+ public DynamicMetricsRegistry getMetricsRegistry() {
+ return metricsRegistry;
+ }
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MerticsRegionSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MerticsRegionSourceImpl.java
new file mode 100644
index 0000000..7c528e7
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MerticsRegionSourceImpl.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
+import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
+
+public class MerticsRegionSourceImpl implements MerticsRegionSource {
+
+ private final MetricsRegionWrapper regionWrapper;
+ private boolean closed = false;
+ private MetricsRegionAggregateSourceImpl agg;
+ private DynamicMetricsRegistry registry;
+ private static final Log LOG = LogFactory.getLog(MerticsRegionSourceImpl.class);
+
+ private String regionNamePrefix;
+ private String regionPutKey;
+ private String regionDeleteKey;
+ private String regionGetKey;
+ private String regionIncrementKey;
+ private String regionAppendKey;
+ private MetricMutableCounterLong regionPut;
+ private MetricMutableCounterLong regionDelete;
+ private MetricMutableCounterLong regionGet;
+ private MetricMutableCounterLong regionIncrement;
+ private MetricMutableCounterLong regionAppend;
+
+ public MerticsRegionSourceImpl(MetricsRegionWrapper regionWrapper,
+ MetricsRegionAggregateSourceImpl aggregate) {
+ this.regionWrapper = regionWrapper;
+ agg = aggregate;
+ agg.register(this);
+
+ LOG.debug("Creating new MerticsRegionSourceImpl for table " +
+ regionWrapper.getTableName() +
+ " " +
+ regionWrapper.getRegionName());
+
+ registry = agg.getMetricsRegistry();
+
+ regionNamePrefix = "table." + regionWrapper.getTableName() + "."
+ + "region." + regionWrapper.getRegionName() + ".";
+
+ String suffix = "Count";
+
+
+ regionPutKey = regionNamePrefix + MetricsRegionServerSource.PUT_KEY + suffix;
+ regionPut = registry.getLongCounter(regionPutKey, 0l);
+
+ regionDeleteKey = regionNamePrefix + MetricsRegionServerSource.DELETE_KEY + suffix;
+ regionDelete = registry.getLongCounter(regionDeleteKey, 0l);
+
+ regionGetKey = regionNamePrefix + MetricsRegionServerSource.GET_KEY + suffix;
+ regionGet = registry.getLongCounter(regionGetKey, 0l);
+
+ regionIncrementKey = regionNamePrefix + MetricsRegionServerSource.INCREMENT_KEY + suffix;
+ regionIncrement = registry.getLongCounter(regionIncrementKey, 0l);
+
+ regionAppendKey = regionNamePrefix + MetricsRegionServerSource.APPEND_KEY + suffix;
+ regionAppend = registry.getLongCounter(regionAppendKey, 0l);
+ }
+
+ @Override
+ public void close() {
+ closed = true;
+ agg.deregister(this);
+
+ LOG.trace("Removing region Metrics: " + regionWrapper.getRegionName());
+ registry.removeMetric(regionPutKey);
+ registry.removeMetric(regionDeleteKey);
+ registry.removeMetric(regionGetKey);
+ registry.removeMetric(regionIncrementKey);
+
+ registry.removeMetric(regionAppendKey);
+
+ JmxCacheBuster.clearJmxCache();
+ }
+
+ @Override
+ public void updatePut() {
+ regionPut.incr();
+ }
+
+ @Override
+ public void updateDelete() {
+ regionDelete.incr();
+ }
+
+ @Override
+ public void updateGet() {
+ regionGet.incr();
+ }
+
+ @Override
+ public void updateIncrement() {
+ regionIncrement.incr();
+ }
+
+ @Override
+ public void updateAppend() {
+ regionAppend.incr();
+ }
+
+ @Override
+ public MetricsRegionAggregateSource getAggregateSource() {
+ return agg;
+ }
+
+ @Override
+ public int compareTo(MerticsRegionSource merticsRegionSource) {
+
+ if (!(merticsRegionSource instanceof MerticsRegionSourceImpl))
+ return -1;
+
+ MerticsRegionSourceImpl impl = (MerticsRegionSourceImpl) merticsRegionSource;
+ return this.regionWrapper.getRegionName()
+ .compareTo(impl.regionWrapper.getRegionName());
+ }
+
+ void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
+ if (closed) return;
+
+ mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT,
+ MetricsRegionServerSource.STORE_COUNT_DESC,
+ this.regionWrapper.getNumStores());
+ mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT,
+ MetricsRegionServerSource.STOREFILE_COUNT_DESC,
+ this.regionWrapper.getNumStoreFiles());
+ mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE,
+ MetricsRegionServerSource.MEMSTORE_SIZE_DESC,
+ this.regionWrapper.getMemstoreSize());
+ mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE,
+ MetricsRegionServerSource.STOREFILE_SIZE_DESC,
+ this.regionWrapper.getStoreFileSize());
+ mrb.addCounter(regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT,
+ MetricsRegionServerSource.READ_REQUEST_COUNT_DESC,
+ this.regionWrapper.getReadRequestCount());
+ mrb.addCounter(regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT,
+ MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC,
+ this.regionWrapper.getWriteRequestCount());
+
+ }
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java
new file mode 100644
index 0000000..77d5544
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.MetricsBuilder;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+
+import java.util.TreeSet;
+
+public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl
+ implements MetricsRegionAggregateSource {
+ private final Log LOG = LogFactory.getLog(this.getClass());
+
+ private final TreeSet regionSources =
+ new TreeSet();
+
+ public MetricsRegionAggregateSourceImpl() {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+ }
+
+
+ public MetricsRegionAggregateSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ }
+
+ @Override
+ public void register(MerticsRegionSource merticsRegionSource) {
+ regionSources.add((MerticsRegionSourceImpl) merticsRegionSource);
+ }
+
+ @Override
+ public void deregister(MerticsRegionSource merticsRegionSource) {
+ regionSources.remove(merticsRegionSource);
+ }
+
+ /**
+ * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
+ * expectations of java programmers. Instead of returning anything Hadoop metrics expects
+ * getMetrics to push the metrics into the metricsBuilder.
+ *
+ * @param metricsBuilder Builder to accept metrics
+ * @param all push all or only changed?
+ */
+ @Override
+ public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
+
+
+ MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
+ .setContext(metricsContext);
+
+ if (regionSources != null) {
+ for (MerticsRegionSourceImpl regionMetricSource : regionSources) {
+ regionMetricSource.snapshot(mrb, all);
+ }
+ }
+
+
+ metricsRegistry.snapshot(mrb, all);
+ }
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java
new file mode 100644
index 0000000..c88118f
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper
+ */
+public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory {
+ private static enum FactoryStorage {
+ INSTANCE;
+ private MetricsRegionServerSource generalSource;
+ private MetricsRegionAggregateSourceImpl aggImpl;
+ }
+
+ private synchronized MetricsRegionAggregateSourceImpl getAggregate() {
+ if (FactoryStorage.INSTANCE.aggImpl == null) {
+ FactoryStorage.INSTANCE.aggImpl = new MetricsRegionAggregateSourceImpl();
+ }
+ return FactoryStorage.INSTANCE.aggImpl;
+ }
+
+
+ @Override
+ public synchronized MetricsRegionServerSource createGeneral(MetricsRegionServerWrapper regionServerWrapper) {
+ if (FactoryStorage.INSTANCE.generalSource == null) {
+ FactoryStorage.INSTANCE.generalSource = new MetricsRegionServerSourceImpl(
+ regionServerWrapper);
+ }
+ return FactoryStorage.INSTANCE.generalSource;
+ }
+
+ @Override
+ public MerticsRegionSource createRegion(MetricsRegionWrapper wrapper) {
+ return new MerticsRegionSourceImpl(wrapper, getAggregate());
+ }
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
new file mode 100644
index 0000000..df7dc6d
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricsBuilder;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+
+/**
+ * Hadoop1 implementation of MetricsRegionServerSource.
+ */
+public class MetricsRegionServerSourceImpl
+ extends BaseSourceImpl implements MetricsRegionServerSource {
+
+ final MetricsRegionServerWrapper rsWrap;
+ private final MetricHistogram putHisto;
+ private final MetricHistogram deleteHisto;
+ private final MetricHistogram getHisto;
+ private final MetricHistogram incrementHisto;
+ private final MetricHistogram appendHisto;
+
+ public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap);
+ }
+
+ public MetricsRegionServerSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext,
+ MetricsRegionServerWrapper rsWrap) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ this.rsWrap = rsWrap;
+
+ putHisto = getMetricsRegistry().getHistogram(PUT_KEY);
+ deleteHisto = getMetricsRegistry().getHistogram(DELETE_KEY);
+ getHisto = getMetricsRegistry().getHistogram(GET_KEY);
+ incrementHisto = getMetricsRegistry().getHistogram(INCREMENT_KEY);
+ appendHisto = getMetricsRegistry().getHistogram(APPEND_KEY);
+ }
+
+ @Override
+ public void init() {
+ super.init();
+ }
+
+ @Override
+ public void updatePut(long t) {
+ putHisto.add(t);
+ }
+
+ @Override
+ public void updateDelete(long t) {
+ deleteHisto.add(t);
+ }
+
+ @Override
+ public void updateGet(long t) {
+ getHisto.add(t);
+ }
+
+ @Override
+ public void updateIncrement(long t) {
+ incrementHisto.add(t);
+ }
+
+ @Override
+ public void updateAppend(long t) {
+ appendHisto.add(t);
+ }
+
+ /**
+ * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
+ * expectations of java programmers. Instead of returning anything Hadoop metrics expects
+ * getMetrics to push the metrics into the metricsBuilder.
+ *
+ * @param metricsBuilder Builder to accept metrics
+ * @param all push all or only changed?
+ */
+ @Override
+ public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
+
+ MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
+ .setContext(metricsContext);
+
+ // rsWrap can be null because this function is called inside of init.
+ if (rsWrap != null) {
+ mrb.addGauge(REGION_COUNT, REGION_COUNT_DESC, rsWrap.getNumOnlineRegions())
+ .addGauge(STORE_COUNT, STORE_COUNT_DESC, rsWrap.getNumStores())
+ .addGauge(STOREFILE_COUNT, STOREFILE_COUNT_DESC, rsWrap.getNumStoreFiles())
+ .addGauge(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC, rsWrap.getMemstoreSize())
+ .addGauge(STOREFILE_SIZE, STOREFILE_SIZE_DESC, rsWrap.getStoreFileSize())
+ .addGauge(RS_START_TIME_NAME, RS_START_TIME_DESC, rsWrap.getRegionServerStartTime())
+ .addCounter(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC, rsWrap.getTotalRequestCount())
+ .addCounter(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC, rsWrap.getReadRequestsCount())
+ .addCounter(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC, rsWrap.getWriteRequestsCount())
+ .addCounter(CHECK_MUTATE_FAILED_COUNT,
+ CHECK_MUTATE_FAILED_COUNT_DESC,
+ rsWrap.getCheckAndMutateChecksFailed())
+ .addCounter(CHECK_MUTATE_PASSED_COUNT,
+ CHECK_MUTATE_PASSED_COUNT_DESC,
+ rsWrap.getCheckAndMutateChecksPassed())
+ .addGauge(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC, rsWrap.getStoreFileIndexSize())
+ .addGauge(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC, rsWrap.getTotalStaticIndexSize())
+ .addGauge(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC, rsWrap.getTotalStaticBloomSize())
+ .addCounter(NUMBER_OF_PUTS_WITHOUT_WAL,
+ NUMBER_OF_PUTS_WITHOUT_WAL_DESC,
+ rsWrap.getNumPutsWithoutWAL())
+ .addGauge(DATA_SIZE_WITHOUT_WAL,
+ DATA_SIZE_WITHOUT_WAL_DESC,
+ rsWrap.getDataInMemoryWithoutWAL())
+ .addGauge(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC, rsWrap.getPercentFileLocal())
+ .addGauge(COMPACTION_QUEUE_LENGTH,
+ COMPACTION_QUEUE_LENGTH_DESC,
+ rsWrap.getCompactionQueueSize())
+ .addGauge(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC, rsWrap.getFlushQueueSize())
+ .addGauge(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC, rsWrap.getBlockCacheFreeSize())
+ .addGauge(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC, rsWrap.getBlockCacheCount())
+ .addGauge(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC, rsWrap.getBlockCacheSize())
+ .addCounter(BLOCK_CACHE_HIT_COUNT,
+ BLOCK_CACHE_HIT_COUNT_DESC,
+ rsWrap.getBlockCacheHitCount())
+ .addCounter(BLOCK_CACHE_MISS_COUNT,
+ BLOCK_COUNT_MISS_COUNT_DESC,
+ rsWrap.getBlockCacheMissCount())
+ .addCounter(BLOCK_CACHE_EVICTION_COUNT,
+ BLOCK_CACHE_EVICTION_COUNT_DESC,
+ rsWrap.getBlockCacheEvictedCount())
+ .addGauge(BLOCK_CACHE_HIT_PERCENT,
+ BLOCK_CACHE_HIT_PERCENT_DESC,
+ rsWrap.getBlockCacheHitPercent())
+ .addGauge(BLOCK_CACHE_EXPRESS_HIT_PERCENT,
+ BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC,
+ rsWrap.getBlockCacheHitCachingPercent())
+ .addCounter(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC, rsWrap.getUpdatesBlockedTime())
+ .tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, rsWrap.getZookeeperQuorum())
+ .tag(SERVER_NAME_NAME, SERVER_NAME_DESC, rsWrap.getServerName())
+ .tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, rsWrap.getClusterId());
+ }
+
+ metricsRegistry.snapshot(mrb, all);
+ }
+
+
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java
new file mode 100644
index 0000000..d8da3b3
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+
+/**
+ * Hadoop1 implementation of MetricsReplicationSource. This provides access to metrics gauges and
+ * counters.
+ */
+public class MetricsReplicationSourceImpl extends BaseSourceImpl implements
+ MetricsReplicationSource {
+
+ public MetricsReplicationSourceImpl() {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+ }
+
+ MetricsReplicationSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ }
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java
deleted file mode 100644
index 0cb8cf9..0000000
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
-
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
-
-/**
- * Hadoop1 implementation of ReplicationMetricsSource. This provides access to metrics gauges and
- * counters.
- */
-public class ReplicationMetricsSourceImpl extends BaseMetricsSourceImpl implements
- ReplicationMetricsSource {
-
- public ReplicationMetricsSourceImpl() {
- this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
- }
-
- ReplicationMetricsSourceImpl(String metricsName,
- String metricsDescription,
- String metricsContext,
- String metricsJmxContext) {
- super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
- }
-}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java
new file mode 100644
index 0000000..c63aa05
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
+
+/**
+ * Hadoop One implementation of a metrics2 source that will export metrics from the Rest server to
+ * the hadoop metrics2 subsystem.
+ */
+public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource {
+
+ private MetricMutableCounterLong request;
+ private MetricMutableCounterLong sucGet;
+ private MetricMutableCounterLong sucPut;
+ private MetricMutableCounterLong sucDel;
+ private MetricMutableCounterLong fGet;
+ private MetricMutableCounterLong fPut;
+ private MetricMutableCounterLong fDel;
+
+ public MetricsRESTSourceImpl() {
+ this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT);
+ }
+
+ public MetricsRESTSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ }
+
+ @Override
+ public void init() {
+ super.init();
+ request = getMetricsRegistry().getLongCounter(REQUEST_KEY, 0l);
+
+ sucGet = getMetricsRegistry().getLongCounter(SUCCESSFUL_GET_KEY, 0l);
+ sucPut = getMetricsRegistry().getLongCounter(SUCCESSFUL_PUT_KEY, 0l);
+ sucDel = getMetricsRegistry().getLongCounter(SUCCESSFUL_DELETE_KEY, 0l);
+
+ fGet = getMetricsRegistry().getLongCounter(FAILED_GET_KEY, 0l);
+ fPut = getMetricsRegistry().getLongCounter(FAILED_PUT_KEY, 0l);
+ fDel = getMetricsRegistry().getLongCounter(FAILED_DELETE_KEY, 0l);
+ }
+
+ @Override
+ public void incrementRequests(int inc) {
+ request.incr(inc);
+ }
+
+ @Override
+ public void incrementSucessfulGetRequests(int inc) {
+ sucGet.incr(inc);
+ }
+
+ @Override
+ public void incrementSucessfulPutRequests(int inc) {
+ sucPut.incr(inc);
+ }
+
+ @Override
+ public void incrementSucessfulDeleteRequests(int inc) {
+ sucDel.incr(inc);
+ }
+
+ @Override
+ public void incrementFailedGetRequests(int inc) {
+ fGet.incr(inc);
+ }
+
+ @Override
+ public void incrementFailedPutRequests(int inc) {
+ fPut.incr(inc);
+ }
+
+ @Override
+ public void incrementFailedDeleteRequests(int inc) {
+ fDel.incr(inc);
+ }
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java
deleted file mode 100644
index eff11dc..0000000
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.metrics;
-
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
-import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
-
-/**
- * Hadoop One implementation of a metrics2 source that will export metrics from the Rest server to
- * the hadoop metrics2 subsystem.
- */
-public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements RESTMetricsSource {
-
- private MetricMutableCounterLong request;
- private MetricMutableCounterLong sucGet;
- private MetricMutableCounterLong sucPut;
- private MetricMutableCounterLong sucDel;
- private MetricMutableCounterLong fGet;
- private MetricMutableCounterLong fPut;
- private MetricMutableCounterLong fDel;
-
- public RESTMetricsSourceImpl() {
- this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT);
- }
-
- public RESTMetricsSourceImpl(String metricsName,
- String metricsDescription,
- String metricsContext,
- String metricsJmxContext) {
- super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
- }
-
- @Override
- public void init() {
- super.init();
- request = getMetricsRegistry().getLongCounter(REQUEST_KEY, 0l);
-
- sucGet = getMetricsRegistry().getLongCounter(SUCCESSFUL_GET_KEY, 0l);
- sucPut = getMetricsRegistry().getLongCounter(SUCCESSFUL_PUT_KEY, 0l);
- sucDel = getMetricsRegistry().getLongCounter(SUCCESSFUL_DELETE_KEY, 0l);
-
- fGet = getMetricsRegistry().getLongCounter(FAILED_GET_KEY, 0l);
- fPut = getMetricsRegistry().getLongCounter(FAILED_PUT_KEY, 0l);
- fDel = getMetricsRegistry().getLongCounter(FAILED_DELETE_KEY, 0l);
- }
-
- @Override
- public void incrementRequests(int inc) {
- request.incr(inc);
- }
-
- @Override
- public void incrementSucessfulGetRequests(int inc) {
- sucGet.incr(inc);
- }
-
- @Override
- public void incrementSucessfulPutRequests(int inc) {
- sucPut.incr(inc);
- }
-
- @Override
- public void incrementSucessfulDeleteRequests(int inc) {
- sucDel.incr(inc);
- }
-
- @Override
- public void incrementFailedGetRequests(int inc) {
- fGet.incr(inc);
- }
-
- @Override
- public void incrementFailedPutRequests(int inc) {
- fPut.incr(inc);
- }
-
- @Override
- public void incrementFailedDeleteRequests(int inc) {
- fDel.incr(inc);
- }
-}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java
new file mode 100644
index 0000000..8762d65
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift;
+
+/**
+ * Class used to create metrics sources for Thrift and Thrift2 servers in hadoop 1's compat
+ * library.
+ */
+public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory {
+
+ /**
+ * A singleton used to make sure that only one thrift metrics source per server type is ever
+ * created.
+ */
+ private static enum FactoryStorage {
+ INSTANCE;
+ MetricsThriftServerSourceImpl thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME,
+ METRICS_DESCRIPTION,
+ THRIFT_ONE_METRICS_CONTEXT,
+ THRIFT_ONE_JMX_CONTEXT);
+ MetricsThriftServerSourceImpl thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME,
+ METRICS_DESCRIPTION,
+ THRIFT_TWO_METRICS_CONTEXT,
+ THRIFT_TWO_JMX_CONTEXT);
+ }
+
+ @Override
+ public MetricsThriftServerSource createThriftOneSource() {
+ return FactoryStorage.INSTANCE.thriftOne;
+ }
+
+ @Override
+ public MetricsThriftServerSource createThriftTwoSource() {
+ return FactoryStorage.INSTANCE.thriftTwo;
+ }
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java
new file mode 100644
index 0000000..6d57186
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
+import org.apache.hadoop.metrics2.lib.MetricMutableStat;
+
+/**
+ * Hadoop 1 version of MetricsThriftServerSource{@link MetricsThriftServerSource}
+ */
+public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements
+ MetricsThriftServerSource {
+
+
+ private MetricMutableStat batchGetStat;
+ private MetricMutableStat batchMutateStat;
+ private MetricMutableStat queueTimeStat;
+
+ private MetricMutableStat thriftCallStat;
+ private MetricMutableStat thriftSlowCallStat;
+
+ private MetricMutableGaugeLong callQueueLenGauge;
+
+ public MetricsThriftServerSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ }
+
+
+ @Override
+ public void init() {
+ super.init();
+ batchGetStat = getMetricsRegistry().newStat(BATCH_GET_KEY, "", "Keys", "Ops");
+ batchMutateStat = getMetricsRegistry().newStat(BATCH_MUTATE_KEY, "", "Keys", "Ops");
+ queueTimeStat = getMetricsRegistry().newStat(TIME_IN_QUEUE_KEY);
+ thriftCallStat = getMetricsRegistry().newStat(THRIFT_CALL_KEY);
+ thriftSlowCallStat = getMetricsRegistry().newStat(SLOW_THRIFT_CALL_KEY);
+ callQueueLenGauge = getMetricsRegistry().getLongGauge(CALL_QUEUE_LEN_KEY, 0);
+ }
+
+ @Override
+ public void incTimeInQueue(long time) {
+ queueTimeStat.add(time);
+ }
+
+ @Override
+ public void setCallQueueLen(int len) {
+ callQueueLenGauge.set(len);
+ }
+
+ @Override
+ public void incNumRowKeysInBatchGet(int diff) {
+ batchGetStat.add(diff);
+ }
+
+ @Override
+ public void incNumRowKeysInBatchMutate(int diff) {
+ batchMutateStat.add(diff);
+ }
+
+ @Override
+ public void incMethodTime(String name, long time) {
+ MetricMutableStat s = getMetricsRegistry().newStat(name);
+ s.add(time);
+ }
+
+ @Override
+ public void incCall(long time) {
+ thriftCallStat.add(time);
+ }
+
+ @Override
+ public void incSlowCall(long time) {
+ thriftSlowCallStat.add(time);
+ }
+
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java
deleted file mode 100644
index 803c657..0000000
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.thrift.metrics;
-
-/**
- * Class used to create metrics sources for Thrift and Thrift2 servers in hadoop 1's compat
- * library.
- */
-public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetricsSourceFactory {
-
- /**
- * A singleton used to make sure that only one thrift metrics source per server type is ever
- * created.
- */
- private static enum FactoryStorage {
- INSTANCE;
- ThriftServerMetricsSourceImpl thriftOne = new ThriftServerMetricsSourceImpl(METRICS_NAME,
- METRICS_DESCRIPTION,
- THRIFT_ONE_METRICS_CONTEXT,
- THRIFT_ONE_JMX_CONTEXT);
- ThriftServerMetricsSourceImpl thriftTwo = new ThriftServerMetricsSourceImpl(METRICS_NAME,
- METRICS_DESCRIPTION,
- THRIFT_TWO_METRICS_CONTEXT,
- THRIFT_TWO_JMX_CONTEXT);
- }
-
- @Override
- public ThriftServerMetricsSource createThriftOneSource() {
- return FactoryStorage.INSTANCE.thriftOne;
- }
-
- @Override
- public ThriftServerMetricsSource createThriftTwoSource() {
- return FactoryStorage.INSTANCE.thriftTwo;
- }
-}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java
deleted file mode 100644
index 7e5d0c4..0000000
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.thrift.metrics;
-
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
-import org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSource;
-import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
-import org.apache.hadoop.metrics2.lib.MetricMutableStat;
-
-/**
- * Hadoop 1 version of ThriftServerMetricsSource{@link ThriftServerMetricsSource}
- */
-public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl implements
- ThriftServerMetricsSource {
-
-
- private MetricMutableStat batchGetStat;
- private MetricMutableStat batchMutateStat;
- private MetricMutableStat queueTimeStat;
-
- private MetricMutableStat thriftCallStat;
- private MetricMutableStat thriftSlowCallStat;
-
- private MetricMutableGaugeLong callQueueLenGauge;
-
- public ThriftServerMetricsSourceImpl(String metricsName,
- String metricsDescription,
- String metricsContext,
- String metricsJmxContext) {
- super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
- }
-
-
- @Override
- public void init() {
- super.init();
- batchGetStat = getMetricsRegistry().newStat(BATCH_GET_KEY, "", "Keys", "Ops");
- batchMutateStat = getMetricsRegistry().newStat(BATCH_MUTATE_KEY, "", "Keys", "Ops");
- queueTimeStat = getMetricsRegistry().newStat(TIME_IN_QUEUE_KEY);
- thriftCallStat = getMetricsRegistry().newStat(THRIFT_CALL_KEY);
- thriftSlowCallStat = getMetricsRegistry().newStat(SLOW_THRIFT_CALL_KEY);
- callQueueLenGauge = getMetricsRegistry().getLongGauge(CALL_QUEUE_LEN_KEY, 0);
- }
-
- @Override
- public void incTimeInQueue(long time) {
- queueTimeStat.add(time);
- }
-
- @Override
- public void setCallQueueLen(int len) {
- callQueueLenGauge.set(len);
- }
-
- @Override
- public void incNumRowKeysInBatchGet(int diff) {
- batchGetStat.add(diff);
- }
-
- @Override
- public void incNumRowKeysInBatchMutate(int diff) {
- batchMutateStat.add(diff);
- }
-
- @Override
- public void incMethodTime(String name, long time) {
- MetricMutableStat s = getMetricsRegistry().newStat(name);
- s.add(time);
- }
-
- @Override
- public void incCall(long time) {
- thriftCallStat.add(time);
- }
-
- @Override
- public void incSlowCall(long time) {
- thriftSlowCallStat.add(time);
- }
-
-}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
new file mode 100644
index 0000000..63a773c
--- /dev/null
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+
+
+public class JmxCacheBuster {
+ private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
+
+ public static void clearJmxCache() {
+ LOG.trace("Clearing JMX mbean cache.");
+
+ // This is pretty extreme but it's the best way that
+ // I could find to get metrics to be removed.
+
+ try {
+ DefaultMetricsSystem.INSTANCE.stop();
+ DefaultMetricsSystem.INSTANCE.start();
+ } catch (Exception exception ) {
+ LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", exception);
+ }
+ }
+}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
index 04fb2a9..3f0bc47 100644
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
@@ -23,6 +23,8 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsTag;
@@ -39,6 +41,8 @@ import org.apache.hadoop.metrics2.MetricsTag;
*/
public class DynamicMetricsRegistry {
+ private final Log LOG = LogFactory.getLog(this.getClass());
+
/** key for the context tag */
public static final String CONTEXT_KEY = "context";
/** description for the context tag */
@@ -284,6 +288,7 @@ public class DynamicMetricsRegistry {
* @param all get all the metrics even if the values are not changed.
*/
public void snapshot(MetricsRecordBuilder builder, boolean all) {
+
for (Entry entry : tags()) {
builder.add(entry.getValue());
}
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java
index 166af08..b7c24dd 100644
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java
@@ -21,9 +21,8 @@ package org.apache.hadoop.metrics2.lib;
import com.yammer.metrics.stats.ExponentiallyDecayingSample;
import com.yammer.metrics.stats.Sample;
import com.yammer.metrics.stats.Snapshot;
-import org.apache.hadoop.metrics.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.lib.MetricMutable;
import java.util.concurrent.atomic.AtomicLong;
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
index 7f4b71b..e80095f 100644
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.metrics2.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.MetricHistogram;
-import org.apache.hadoop.metrics.MetricsExecutor;
+import org.apache.hadoop.metrics2.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricsExecutor;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.util.MetricQuantile;
import org.apache.hadoop.metrics2.util.MetricSampleQuantiles;
diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
index 3135758..d47912c 100644
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2.lib;
-import org.apache.hadoop.metrics.MetricsExecutor;
+import org.apache.hadoop.metrics2.MetricsExecutor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory
new file mode 100644
index 0000000..a5e43e4
--- /dev/null
+++ hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl
\ No newline at end of file
diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory
deleted file mode 100644
index e81c3dc..0000000
--- hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactoryImpl
diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory
new file mode 100644
index 0000000..bc2f643
--- /dev/null
+++ hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl
diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource
new file mode 100644
index 0000000..1e0dd20
--- /dev/null
+++ hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl
\ No newline at end of file
diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource
deleted file mode 100644
index bb64ad5..0000000
--- hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceImpl
\ No newline at end of file
diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource
new file mode 100644
index 0000000..5a4a8e9
--- /dev/null
+++ hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl
\ No newline at end of file
diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource
deleted file mode 100644
index 9e7a28d..0000000
--- hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.rest.metrics.RESTMetricsSourceImpl
\ No newline at end of file
diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory
new file mode 100644
index 0000000..2b5c163
--- /dev/null
+++ hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl
\ No newline at end of file
diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory
deleted file mode 100644
index 62d1c6a..0000000
--- hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactoryImpl
\ No newline at end of file
diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor
new file mode 100644
index 0000000..dc12052
--- /dev/null
+++ hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor
@@ -0,0 +1 @@
+org.apache.hadoop.metrics2.lib.MetricsExecutorImpl
diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsSourceImpl.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsSourceImpl.java
new file mode 100644
index 0000000..0db47f9
--- /dev/null
+++ hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsSourceImpl.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSource;
+import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSourceImpl;
+import org.junit.Test;
+
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for MetricsMasterSourceImpl
+ */
+public class TestMasterMetricsSourceImpl {
+
+ @Test
+ public void testGetInstance() throws Exception {
+ MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory
+ .getInstance(MetricsMasterSourceFactory.class);
+ MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null);
+ assertTrue(masterSource instanceof MetricsMasterSourceImpl);
+ assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class));
+ }
+
+}
diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java
deleted file mode 100644
index fe384d7..0000000
--- hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for MasterMetricsSourceImpl
- */
-public class TestMasterMetricsSourceImpl {
-
- @Test
- public void testGetInstance() throws Exception {
- MasterMetricsSourceFactory masterMetricsSourceFactory = CompatibilitySingletonFactory
- .getInstance(MasterMetricsSourceFactory.class);
- MasterMetricsSource masterMetricsSource = masterMetricsSourceFactory.create(null);
- assertTrue(masterMetricsSource instanceof MasterMetricsSourceImpl);
- assertSame(masterMetricsSourceFactory, CompatibilitySingletonFactory.getInstance(MasterMetricsSourceFactory.class));
- }
-
-}
diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImplTest.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImplTest.java
index 095cb14..cd4fb5f 100644
--- hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImplTest.java
+++ hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImplTest.java
@@ -28,15 +28,15 @@ import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
/**
- * Test of the default BaseMetricsSource implementation for hadoop 1
+ * Test of the default BaseSource implementation for hadoop 1
*/
public class TestBaseMetricsSourceImplTest {
- private static BaseMetricsSourceImpl bmsi;
+ private static BaseSourceImpl bmsi;
@BeforeClass
public static void setUp() throws Exception {
- bmsi = new BaseMetricsSourceImpl("TestName", "test description", "testcontext", "TestContext");
+ bmsi = new BaseSourceImpl("TestName", "test description", "testcontext", "TestContext");
}
@Test
@@ -81,17 +81,11 @@ public class TestBaseMetricsSourceImplTest {
}
@Test
- public void testRemoveGauge() throws Exception {
+ public void testRemoveMetric() throws Exception {
bmsi.setGauge("testrm", 100);
- bmsi.removeGauge("testrm");
+ bmsi.removeMetric("testrm");
assertNull(bmsi.metricsRegistry.get("testrm"));
}
- @Test
- public void testRemoveCounter() throws Exception {
- bmsi.incCounters("testrm", 100);
- bmsi.removeCounter("testrm");
- assertNull(bmsi.metricsRegistry.get("testrm"));
- }
}
diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/RegionServerMetricsSourceImplTest.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/RegionServerMetricsSourceImplTest.java
new file mode 100644
index 0000000..b2d8525
--- /dev/null
+++ hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/RegionServerMetricsSourceImplTest.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceImpl;
+import org.junit.Test;
+
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for MetricsRegionServerSourceImpl
+ */
+public class RegionServerMetricsSourceImplTest {
+
+ @Test
+ public void testGetInstance() throws Exception {
+ MetricsRegionServerSourceFactory metricsRegionServerSourceFactory =
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
+ MetricsRegionServerSource generalSource =
+ metricsRegionServerSourceFactory.createGeneral(null);
+ assertTrue(generalSource instanceof MetricsRegionServerSourceImpl);
+ assertSame(metricsRegionServerSourceFactory,
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class));
+ }
+
+
+ @Test(expected = RuntimeException.class)
+ public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
+ // This should throw an exception because MetricsRegionServerSourceImpl should only
+ // be created by a factory.
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceImpl.class);
+ }
+
+}
diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java
new file mode 100644
index 0000000..dd1c3a7
--- /dev/null
+++ hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test to make sure that MetricsReplicationSourceImpl is hooked up to ServiceLoader
+ */
+public class TestReplicationMetricsSourceImpl {
+
+ @Test
+ public void testGetInstance() throws Exception {
+ MetricsReplicationSource rms = CompatibilitySingletonFactory
+ .getInstance(MetricsReplicationSource.class);
+ assertTrue(rms instanceof MetricsReplicationSourceImpl);
+ }
+}
diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java
deleted file mode 100644
index 411d5be..0000000
--- hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test to make sure that ReplicationMetricsSourceImpl is hooked up to ServiceLoader
- */
-public class TestReplicationMetricsSourceImpl {
-
- @Test
- public void testGetInstance() throws Exception {
- ReplicationMetricsSource rms = CompatibilitySingletonFactory
- .getInstance(ReplicationMetricsSource.class);
- assertTrue(rms instanceof ReplicationMetricsSourceImpl);
- }
-}
diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java
new file mode 100644
index 0000000..30ffd6e
--- /dev/null
+++ hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.rest.MetricsRESTSource;
+import org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl;
+import org.junit.Test;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for hadoop1's version of MetricsRESTSource
+ */
+public class TestRESTMetricsSourceImpl {
+
+ @Test
+ public void ensureCompatRegistered() throws Exception {
+ assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class));
+ assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl);
+ }
+
+}
diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java
deleted file mode 100644
index 3f309eb..0000000
--- hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for hadoop1's version of RESTMetricsSource
- */
-public class TestRESTMetricsSourceImpl {
-
- @Test
- public void ensureCompatRegistered() throws Exception {
- assertNotNull(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class));
- assertTrue(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class) instanceof RESTMetricsSourceImpl);
- }
-
-}
diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
index 346047c..a54a3ee 100644
--- hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
+++ hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hbase.test;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
+import org.apache.hadoop.hbase.metrics.BaseSource;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.Metric;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@@ -110,68 +110,68 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public void assertTag(String name, String expected, BaseMetricsSource source) {
+ public void assertTag(String name, String expected, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertEquals("Tags should be equal", expected, tags.get(cName));
}
@Override
- public void assertGauge(String name, long expected, BaseMetricsSource source) {
+ public void assertGauge(String name, long expected, BaseSource source) {
long found = getGaugeLong(name, source);
assertEquals("Metrics Should be equal", (long) Long.valueOf(expected), found);
}
@Override
- public void assertGaugeGt(String name, long expected, BaseMetricsSource source) {
+ public void assertGaugeGt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertGaugeLt(String name, long expected, BaseMetricsSource source) {
+ public void assertGaugeLt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public void assertGauge(String name, double expected, BaseMetricsSource source) {
+ public void assertGauge(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
- assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found);
+ assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found, 0.01);
}
@Override
- public void assertGaugeGt(String name, double expected, BaseMetricsSource source) {
+ public void assertGaugeGt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertGaugeLt(String name, double expected, BaseMetricsSource source) {
+ public void assertGaugeLt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public void assertCounter(String name, long expected, BaseMetricsSource source) {
+ public void assertCounter(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertEquals("Metrics Counters should be equal", (long) Long.valueOf(expected), found);
}
@Override
- public void assertCounterGt(String name, long expected, BaseMetricsSource source) {
+ public void assertCounterGt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertCounterLt(String name, long expected, BaseMetricsSource source) {
+ public void assertCounterLt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public long getCounter(String name, BaseMetricsSource source) {
+ public long getCounter(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(counters.get(cName));
@@ -179,7 +179,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public double getGaugeDouble(String name, BaseMetricsSource source) {
+ public double getGaugeDouble(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@@ -187,7 +187,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public long getGaugeLong(String name, BaseMetricsSource source) {
+ public long getGaugeLong(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@@ -200,12 +200,12 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
counters.clear();
}
- private void getMetrics(BaseMetricsSource source) {
+ private void getMetrics(BaseSource source) {
reset();
- if (!(source instanceof BaseMetricsSourceImpl)) {
+ if (!(source instanceof BaseSourceImpl)) {
assertTrue(false);
}
- BaseMetricsSourceImpl impl = (BaseMetricsSourceImpl) source;
+ BaseSourceImpl impl = (BaseSourceImpl) source;
impl.getMetrics(new MockMetricsBuilder(), true);
diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java
new file mode 100644
index 0000000..c768399
--- /dev/null
+++ hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory;
+import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl;
+import org.junit.Test;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test the hadoop 1 version of MetricsThriftServerSourceFactory
+ */
+public class TestThriftServerMetricsSourceFactoryImpl {
+
+ @Test
+ public void testCompatabilityRegistered() throws Exception {
+ assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class));
+ assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl);
+ }
+
+ @Test
+ public void testCreateThriftOneSource() throws Exception {
+ //Make sure that the factory gives back a singleton.
+ assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(),
+ new MetricsThriftServerSourceFactoryImpl().createThriftOneSource());
+
+ }
+
+ @Test
+ public void testCreateThriftTwoSource() throws Exception {
+ //Make sure that the factory gives back a singleton.
+ assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(),
+ new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource());
+ }
+}
diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java
deleted file mode 100644
index c7b362f..0000000
--- hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.thrift.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test the hadoop 1 version of ThriftServerMetricsSourceFactory
- */
-public class TestThriftServerMetricsSourceFactoryImpl {
-
- @Test
- public void testCompatabilityRegistered() throws Exception {
- assertNotNull(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class));
- assertTrue(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class) instanceof ThriftServerMetricsSourceFactoryImpl);
- }
-
- @Test
- public void testCreateThriftOneSource() throws Exception {
- //Make sure that the factory gives back a singleton.
- assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource(),
- new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource());
-
- }
-
- @Test
- public void testCreateThriftTwoSource() throws Exception {
- //Make sure that the factory gives back a singleton.
- assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource(),
- new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource());
- }
-}
diff --git hbase-hadoop1-compat/src/test/resources/hadoop-metrics2.properties hbase-hadoop1-compat/src/test/resources/hadoop-metrics2.properties
new file mode 100644
index 0000000..e69de29
diff --git hbase-hadoop2-compat/pom.xml hbase-hadoop2-compat/pom.xml
index df8e764..754ea10 100644
--- hbase-hadoop2-compat/pom.xml
+++ hbase-hadoop2-compat/pom.xml
@@ -138,6 +138,10 @@ limitations under the License.
com.yammer.metricsmetrics-core
+
+ log4j
+ log4j
+
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java
new file mode 100644
index 0000000..6cb3f43
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+/**
+ * Factory to create MetricsMasterSource when given a MetricsMasterWrapper
+ */
+public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory {
+ private static enum FactoryStorage {
+ INSTANCE;
+ MetricsMasterSource masterSource;
+ }
+
+ @Override
+ public synchronized MetricsMasterSource create(MetricsMasterWrapper masterWrapper) {
+ if (FactoryStorage.INSTANCE.masterSource == null ) {
+ FactoryStorage.INSTANCE.masterSource = new MetricsMasterSourceImpl(masterWrapper);
+ }
+ return FactoryStorage.INSTANCE.masterSource;
+ }
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
new file mode 100644
index 0000000..5b80fe0
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.hbase.master.MetricsMasterSource;
+import org.apache.hadoop.hbase.master.MetricsMasterWrapper;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+
+/** Hadoop2 implementation of MetricsMasterSource. */
+public class MetricsMasterSourceImpl
+ extends BaseSourceImpl implements MetricsMasterSource {
+
+
+ MutableCounterLong clusterRequestsCounter;
+ MutableGaugeLong ritGauge;
+ MutableGaugeLong ritCountOverThresholdGauge;
+ MutableGaugeLong ritOldestAgeGauge;
+ private final MetricsMasterWrapper masterWrapper;
+ private MutableHistogram splitTimeHisto;
+ private MutableHistogram splitSizeHisto;
+
+ public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) {
+ this(METRICS_NAME,
+ METRICS_DESCRIPTION,
+ METRICS_CONTEXT,
+ METRICS_JMX_CONTEXT,
+ masterWrapper);
+ }
+
+ public MetricsMasterSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext,
+ MetricsMasterWrapper masterWrapper) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ this.masterWrapper = masterWrapper;
+
+ }
+
+ @Override
+ public void init() {
+ super.init();
+ clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0l);
+ ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l);
+ ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, "", 0l);
+ ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, "", 0l);
+ splitTimeHisto = metricsRegistry.newHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC);
+ splitSizeHisto = metricsRegistry.newHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC);
+ }
+
+ public void incRequests(final int inc) {
+ this.clusterRequestsCounter.incr(inc);
+ }
+
+ public void setRIT(int ritCount) {
+ ritGauge.set(ritCount);
+ }
+
+ public void setRITCountOverThreshold(int ritCount) {
+ ritCountOverThresholdGauge.set(ritCount);
+ }
+
+ public void setRITOldestAge(long ritCount) {
+ ritOldestAgeGauge.set(ritCount);
+ }
+
+ @Override
+ public void updateSplitTime(long time) {
+ splitTimeHisto.add(time);
+ }
+
+ @Override
+ public void updateSplitSize(long size) {
+ splitSizeHisto.add(size);
+ }
+
+ @Override
+ public void getMetrics(MetricsCollector metricsCollector, boolean all) {
+
+ MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName)
+ .setContext(metricsContext);
+
+ // masterWrapper can be null because this function is called inside of init.
+ if (masterWrapper != null) {
+ metricsRecordBuilder
+ .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME,
+ MASTER_ACTIVE_TIME_DESC), masterWrapper.getMasterActiveTime())
+ .addGauge(Interns.info(MASTER_START_TIME_NAME,
+ MASTER_START_TIME_DESC), masterWrapper.getMasterStartTime())
+ .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC),
+ masterWrapper.getAverageLoad())
+ .addGauge(Interns.info(NUM_REGION_SERVERS_NAME,
+ NUMBER_OF_REGION_SERVERS_DESC), masterWrapper.getRegionServers())
+ .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME,
+ NUMBER_OF_DEAD_REGION_SERVERS_DESC),
+ masterWrapper.getDeadRegionServers())
+ .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC),
+ masterWrapper.getZookeeperQuorum())
+ .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName())
+ .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId())
+ .tag(Interns.info(IS_ACTIVE_MASTER_NAME,
+ IS_ACTIVE_MASTER_DESC),
+ String.valueOf(masterWrapper.getIsActiveMaster()));
+ }
+
+ metricsRegistry.snapshot(metricsRecordBuilder, all);
+ }
+
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java
deleted file mode 100644
index 4a17046..0000000
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.metrics;
-
-/**
- * Factory to create MasterMetricsSource when given a MasterMetricsWrapper
- */
-public class MasterMetricsSourceFactoryImpl implements MasterMetricsSourceFactory {
- private static enum FactoryStorage {
- INSTANCE;
- MasterMetricsSource source;
- }
-
- @Override
- public synchronized MasterMetricsSource create(MasterMetricsWrapper beanWrapper) {
- if (FactoryStorage.INSTANCE.source == null ) {
- FactoryStorage.INSTANCE.source = new MasterMetricsSourceImpl(beanWrapper);
- }
- return FactoryStorage.INSTANCE.source;
- }
-}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java
deleted file mode 100644
index 90baedd..0000000
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.metrics;
-
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.lib.Interns;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
-import org.apache.hadoop.metrics2.lib.MutableHistogram;
-
-/** Hadoop2 implementation of MasterMetricsSource. */
-public class MasterMetricsSourceImpl
- extends BaseMetricsSourceImpl implements MasterMetricsSource {
-
-
- MutableCounterLong clusterRequestsCounter;
- MutableGaugeLong ritGauge;
- MutableGaugeLong ritCountOverThresholdGauge;
- MutableGaugeLong ritOldestAgeGauge;
- private final MasterMetricsWrapper masterWrapper;
- private MutableHistogram splitTimeHisto;
- private MutableHistogram splitSizeHisto;
-
- public MasterMetricsSourceImpl(MasterMetricsWrapper masterMetricsWrapper) {
- this(METRICS_NAME,
- METRICS_DESCRIPTION,
- METRICS_CONTEXT,
- METRICS_JMX_CONTEXT,
- masterMetricsWrapper);
- }
-
- public MasterMetricsSourceImpl(String metricsName,
- String metricsDescription,
- String metricsContext,
- String metricsJmxContext,
- MasterMetricsWrapper masterWrapper) {
- super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
- this.masterWrapper = masterWrapper;
-
- }
-
- @Override
- public void init() {
- super.init();
- clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0l);
- ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l);
- ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, "", 0l);
- ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, "", 0l);
- splitTimeHisto = metricsRegistry.newHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC);
- splitSizeHisto = metricsRegistry.newHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC);
- }
-
- public void incRequests(final int inc) {
- this.clusterRequestsCounter.incr(inc);
- }
-
- public void setRIT(int ritCount) {
- ritGauge.set(ritCount);
- }
-
- public void setRITCountOverThreshold(int ritCount) {
- ritCountOverThresholdGauge.set(ritCount);
- }
-
- public void setRITOldestAge(long ritCount) {
- ritOldestAgeGauge.set(ritCount);
- }
-
- @Override
- public void updateSplitTime(long time) {
- splitTimeHisto.add(time);
- }
-
- @Override
- public void updateSplitSize(long size) {
- splitSizeHisto.add(size);
- }
-
- @Override
- public void getMetrics(MetricsCollector metricsCollector, boolean all) {
-
- MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName)
- .setContext(metricsContext);
-
- // masterWrapper can be null because this function is called inside of init.
- if (masterWrapper != null) {
- metricsRecordBuilder
- .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME,
- MASTER_ACTIVE_TIME_DESC), masterWrapper.getMasterActiveTime())
- .addGauge(Interns.info(MASTER_START_TIME_NAME,
- MASTER_START_TIME_DESC), masterWrapper.getMasterStartTime())
- .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC),
- masterWrapper.getAverageLoad())
- .addGauge(Interns.info(NUM_REGION_SERVERS_NAME,
- NUMBER_OF_REGION_SERVERS_DESC), masterWrapper.getRegionServers())
- .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME,
- NUMBER_OF_DEAD_REGION_SERVERS_DESC),
- masterWrapper.getDeadRegionServers())
- .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC),
- masterWrapper.getZookeeperQuorum())
- .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName())
- .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId())
- .tag(Interns.info(IS_ACTIVE_MASTER_NAME,
- IS_ACTIVE_MASTER_DESC),
- String.valueOf(masterWrapper.getIsActiveMaster()));
- }
-
- metricsRegistry.snapshot(metricsRecordBuilder, true);
- }
-
-}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java
deleted file mode 100644
index 7e37089..0000000
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.metrics;
-
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MetricMutableQuantiles;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
-import org.apache.hadoop.metrics2.lib.MutableHistogram;
-import org.apache.hadoop.metrics2.source.JvmMetrics;
-
-/**
- * Hadoop 2 implementation of BaseMetricsSource (using metrics2 framework)
- */
-public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
-
- private static enum DefaultMetricsSystemInitializer {
- INSTANCE;
- private boolean inited = false;
- private JvmMetrics jvmMetricsSource;
-
- synchronized void init(String name) {
- if (inited) return;
- inited = true;
- DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME);
- jvmMetricsSource = JvmMetrics.create(name, "", DefaultMetricsSystem.instance());
-
- }
- }
-
- public static final String HBASE_METRICS_SYSTEM_NAME = "hbase";
-
- protected final DynamicMetricsRegistry metricsRegistry;
- protected final String metricsName;
- protected final String metricsDescription;
- protected final String metricsContext;
- protected final String metricsJmxContext;
-
- public BaseMetricsSourceImpl(
- String metricsName,
- String metricsDescription,
- String metricsContext,
- String metricsJmxContext) {
-
- this.metricsName = metricsName;
- this.metricsDescription = metricsDescription;
- this.metricsContext = metricsContext;
- this.metricsJmxContext = metricsJmxContext;
-
- metricsRegistry = new DynamicMetricsRegistry(metricsName).setContext(metricsContext);
- DefaultMetricsSystemInitializer.INSTANCE.init(metricsName);
-
- //Register this instance.
- DefaultMetricsSystem.instance().register(metricsJmxContext, metricsDescription, this);
- init();
-
- }
-
- public void init() {
- this.metricsRegistry.clearMetrics();
- }
-
- /**
- * Set a single gauge to a value.
- *
- * @param gaugeName gauge name
- * @param value the new value of the gauge.
- */
- public void setGauge(String gaugeName, long value) {
- MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, value);
- gaugeInt.set(value);
- }
-
- /**
- * Add some amount to a gauge.
- *
- * @param gaugeName The name of the gauge to increment.
- * @param delta The amount to increment the gauge by.
- */
- public void incGauge(String gaugeName, long delta) {
- MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l);
- gaugeInt.incr(delta);
- }
-
- /**
- * Decrease the value of a named gauge.
- *
- * @param gaugeName The name of the gauge.
- * @param delta the ammount to subtract from a gauge value.
- */
- public void decGauge(String gaugeName, long delta) {
- MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l);
- gaugeInt.decr(delta);
- }
-
- /**
- * Increment a named counter by some value.
- *
- * @param key the name of the counter
- * @param delta the ammount to increment
- */
- public void incCounters(String key, long delta) {
- MutableCounterLong counter = metricsRegistry.getLongCounter(key, 0l);
- counter.incr(delta);
-
- }
-
- @Override
- public void updateHistogram(String name, long value) {
- MutableHistogram histo = metricsRegistry.getHistogram(name);
- histo.add(value);
- }
-
- @Override
- public void updateQuantile(String name, long value) {
- MetricMutableQuantiles histo = metricsRegistry.getQuantile(name);
- histo.add(value);
- }
-
- /**
- * Remove a named gauge.
- *
- * @param key
- */
- public void removeGauge(String key) {
- metricsRegistry.removeMetric(key);
- }
-
- /**
- * Remove a named counter.
- *
- * @param key
- */
- public void removeCounter(String key) {
- metricsRegistry.removeMetric(key);
- }
-
- protected DynamicMetricsRegistry getMetricsRegistry() {
- return metricsRegistry;
- }
-
- @Override
- public void getMetrics(MetricsCollector metricsCollector, boolean all) {
- metricsRegistry.snapshot(metricsCollector.addRecord(metricsRegistry.info()), all);
- }
-
-
-}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java
new file mode 100644
index 0000000..2077951
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.metrics;
+
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MetricMutableQuantiles;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Hadoop 2 implementation of BaseSource (using metrics2 framework)
+ */
+public class BaseSourceImpl implements BaseSource, MetricsSource {
+
+ private static enum DefaultMetricsSystemInitializer {
+ INSTANCE;
+ private boolean inited = false;
+ private JvmMetrics jvmMetricsSource;
+
+ synchronized void init(String name) {
+ if (inited) return;
+ inited = true;
+ DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME);
+ jvmMetricsSource = JvmMetrics.create(name, "", DefaultMetricsSystem.instance());
+
+ }
+ }
+
+ protected final DynamicMetricsRegistry metricsRegistry;
+ protected final String metricsName;
+ protected final String metricsDescription;
+ protected final String metricsContext;
+ protected final String metricsJmxContext;
+
+ public BaseSourceImpl(
+ String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+
+ this.metricsName = metricsName;
+ this.metricsDescription = metricsDescription;
+ this.metricsContext = metricsContext;
+ this.metricsJmxContext = metricsJmxContext;
+
+ metricsRegistry = new DynamicMetricsRegistry(metricsName).setContext(metricsContext);
+ DefaultMetricsSystemInitializer.INSTANCE.init(metricsName);
+
+ //Register this instance.
+ DefaultMetricsSystem.instance().register(metricsJmxContext, metricsDescription, this);
+ init();
+
+ }
+
+ public void init() {
+ this.metricsRegistry.clearMetrics();
+ }
+
+ /**
+ * Set a single gauge to a value.
+ *
+ * @param gaugeName gauge name
+ * @param value the new value of the gauge.
+ */
+ public void setGauge(String gaugeName, long value) {
+ MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, value);
+ gaugeInt.set(value);
+ }
+
+ /**
+ * Add some amount to a gauge.
+ *
+ * @param gaugeName The name of the gauge to increment.
+ * @param delta The amount to increment the gauge by.
+ */
+ public void incGauge(String gaugeName, long delta) {
+ MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l);
+ gaugeInt.incr(delta);
+ }
+
+ /**
+ * Decrease the value of a named gauge.
+ *
+ * @param gaugeName The name of the gauge.
+ * @param delta the ammount to subtract from a gauge value.
+ */
+ public void decGauge(String gaugeName, long delta) {
+ MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l);
+ gaugeInt.decr(delta);
+ }
+
+ /**
+ * Increment a named counter by some value.
+ *
+ * @param key the name of the counter
+ * @param delta the ammount to increment
+ */
+ public void incCounters(String key, long delta) {
+ MutableCounterLong counter = metricsRegistry.getLongCounter(key, 0l);
+ counter.incr(delta);
+
+ }
+
+ @Override
+ public void updateHistogram(String name, long value) {
+ MutableHistogram histo = metricsRegistry.getHistogram(name);
+ histo.add(value);
+ }
+
+ @Override
+ public void updateQuantile(String name, long value) {
+ MetricMutableQuantiles histo = metricsRegistry.getQuantile(name);
+ histo.add(value);
+ }
+
+ /**
+ * Remove a named gauge.
+ *
+ * @param key
+ */
+ public void removeMetric(String key) {
+ metricsRegistry.removeMetric(key);
+ JmxCacheBuster.clearJmxCache();
+ }
+
+ public DynamicMetricsRegistry getMetricsRegistry() {
+ return metricsRegistry;
+ }
+
+ @Override
+ public void getMetrics(MetricsCollector metricsCollector, boolean all) {
+ metricsRegistry.snapshot(metricsCollector.addRecord(metricsRegistry.info()), all);
+ }
+
+
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MerticsRegionSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MerticsRegionSourceImpl.java
new file mode 100644
index 0000000..df2f63e
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MerticsRegionSourceImpl.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
+import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+
+public class MerticsRegionSourceImpl implements MerticsRegionSource {
+
+ private final MetricsRegionWrapper regionWrapper;
+ private boolean closed = false;
+ private MetricsRegionAggregateSourceImpl agg;
+ private DynamicMetricsRegistry registry;
+ private static final Log LOG = LogFactory.getLog(MerticsRegionSourceImpl.class);
+
+ private String regionNamePrefix;
+ private String regionPutKey;
+ private String regionDeleteKey;
+ private String regionGetKey;
+ private String regionIncrementKey;
+ private String regionAppendKey;
+ private MutableCounterLong regionPut;
+ private MutableCounterLong regionDelete;
+ private MutableCounterLong regionGet;
+ private MutableCounterLong regionIncrement;
+ private MutableCounterLong regionAppend;
+
+
+ public MerticsRegionSourceImpl(MetricsRegionWrapper regionWrapper,
+ MetricsRegionAggregateSourceImpl aggregate) {
+ this.regionWrapper = regionWrapper;
+ agg = aggregate;
+ agg.register(this);
+
+ LOG.debug("Creating new MerticsRegionSourceImpl for table " +
+ regionWrapper.getTableName() +
+ " " +
+ regionWrapper.getRegionName());
+
+ registry = agg.getMetricsRegistry();
+
+ regionNamePrefix = "table." + regionWrapper.getTableName() + "."
+ + "region." + regionWrapper.getRegionName() + ".";
+
+ String suffix = "Count";
+
+ regionPutKey = regionNamePrefix + MetricsRegionServerSource.PUT_KEY + suffix;
+ regionPut = registry.getLongCounter(regionPutKey, 0l);
+
+ regionDeleteKey = regionNamePrefix + MetricsRegionServerSource.DELETE_KEY + suffix;
+ regionDelete = registry.getLongCounter(regionDeleteKey, 0l);
+
+ regionGetKey = regionNamePrefix + MetricsRegionServerSource.GET_KEY + suffix;
+ regionGet = registry.getLongCounter(regionGetKey, 0l);
+
+ regionIncrementKey = regionNamePrefix + MetricsRegionServerSource.INCREMENT_KEY + suffix;
+ regionIncrement = registry.getLongCounter(regionIncrementKey, 0l);
+
+ regionAppendKey = regionNamePrefix + MetricsRegionServerSource.APPEND_KEY + suffix;
+ regionAppend = registry.getLongCounter(regionAppendKey, 0l);
+ }
+
+ @Override
+ public void close() {
+ closed = true;
+ agg.deregister(this);
+
+ LOG.trace("Removing region Metrics: " + regionWrapper.getRegionName());
+ registry.removeMetric(regionPutKey);
+ registry.removeMetric(regionDeleteKey);
+ registry.removeMetric(regionGetKey);
+ registry.removeMetric(regionIncrementKey);
+
+ registry.removeMetric(regionAppendKey);
+
+ JmxCacheBuster.clearJmxCache();
+ }
+
+ @Override
+ public void updatePut() {
+ regionPut.incr();
+ }
+
+ @Override
+ public void updateDelete() {
+ regionDelete.incr();
+ }
+
+ @Override
+ public void updateGet() {
+ regionGet.incr();
+ }
+
+ @Override
+ public void updateIncrement() {
+ regionIncrement.incr();
+ }
+
+ @Override
+ public void updateAppend() {
+ regionAppend.incr();
+ }
+
+ @Override
+ public MetricsRegionAggregateSource getAggregateSource() {
+ return agg;
+ }
+
+ @Override
+ public int compareTo(MerticsRegionSource merticsRegionSource) {
+
+ if (!(merticsRegionSource instanceof MerticsRegionSourceImpl))
+ return -1;
+
+ MerticsRegionSourceImpl impl = (MerticsRegionSourceImpl) merticsRegionSource;
+ return this.regionWrapper.getRegionName()
+ .compareTo(impl.regionWrapper.getRegionName());
+ }
+
+ void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
+ if (closed) return;
+
+ mrb.addGauge(
+ Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT,
+ MetricsRegionServerSource.STORE_COUNT_DESC),
+ this.regionWrapper.getNumStores());
+ mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT,
+ MetricsRegionServerSource.STOREFILE_COUNT_DESC),
+ this.regionWrapper.getNumStoreFiles());
+ mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE,
+ MetricsRegionServerSource.MEMSTORE_SIZE_DESC),
+ this.regionWrapper.getMemstoreSize());
+ mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE,
+ MetricsRegionServerSource.STOREFILE_SIZE_DESC),
+ this.regionWrapper.getStoreFileSize());
+ }
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java
new file mode 100644
index 0000000..5537457
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+
+import java.util.TreeSet;
+
+public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl
+ implements MetricsRegionAggregateSource {
+
+ private final Log LOG = LogFactory.getLog(this.getClass());
+
+ private final TreeSet regionSources =
+ new TreeSet();
+
+ public MetricsRegionAggregateSourceImpl() {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+ }
+
+
+ public MetricsRegionAggregateSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ }
+
+ @Override
+ public void register(MerticsRegionSource merticsRegionSource) {
+ regionSources.add((MerticsRegionSourceImpl) merticsRegionSource);
+ }
+
+ @Override
+ public void deregister(MerticsRegionSource merticsRegionSource) {
+ regionSources.remove(merticsRegionSource);
+ }
+
+ /**
+ * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
+ * expectations of java programmers. Instead of returning anything Hadoop metrics expects
+ * getMetrics to push the metrics into the collector.
+ *
+ * @param collector the collector
+ * @param all get all the metrics regardless of when they last changed.
+ */
+ @Override
+ public void getMetrics(MetricsCollector collector, boolean all) {
+
+
+ MetricsRecordBuilder mrb = collector.addRecord(metricsName)
+ .setContext(metricsContext);
+
+ if (regionSources != null) {
+ for (MerticsRegionSourceImpl regionMetricSource : regionSources) {
+ regionMetricSource.snapshot(mrb, all);
+ }
+ }
+
+ metricsRegistry.snapshot(mrb, all);
+ }
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java
new file mode 100644
index 0000000..c88118f
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper
+ */
+public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory {
+ private static enum FactoryStorage {
+ INSTANCE;
+ private MetricsRegionServerSource generalSource;
+ private MetricsRegionAggregateSourceImpl aggImpl;
+ }
+
+ private synchronized MetricsRegionAggregateSourceImpl getAggregate() {
+ if (FactoryStorage.INSTANCE.aggImpl == null) {
+ FactoryStorage.INSTANCE.aggImpl = new MetricsRegionAggregateSourceImpl();
+ }
+ return FactoryStorage.INSTANCE.aggImpl;
+ }
+
+
+ @Override
+ public synchronized MetricsRegionServerSource createGeneral(MetricsRegionServerWrapper regionServerWrapper) {
+ if (FactoryStorage.INSTANCE.generalSource == null) {
+ FactoryStorage.INSTANCE.generalSource = new MetricsRegionServerSourceImpl(
+ regionServerWrapper);
+ }
+ return FactoryStorage.INSTANCE.generalSource;
+ }
+
+ @Override
+ public MerticsRegionSource createRegion(MetricsRegionWrapper wrapper) {
+ return new MerticsRegionSourceImpl(wrapper, getAggregate());
+ }
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
new file mode 100644
index 0000000..09b12f2
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource;
+import org.apache.hadoop.metrics2.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.lib.Interns;
+
+/**
+ * Hadoop1 implementation of MetricsRegionServerSource.
+ */
+public class MetricsRegionServerSourceImpl
+ extends BaseSourceImpl implements MetricsRegionServerSource {
+
+ final MetricsRegionServerWrapper rsWrap;
+ private final MetricHistogram putHisto;
+ private final MetricHistogram deleteHisto;
+ private final MetricHistogram getHisto;
+ private final MetricHistogram incrementHisto;
+ private final MetricHistogram appendHisto;
+
+ public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap);
+ }
+
+ public MetricsRegionServerSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext,
+ MetricsRegionServerWrapper rsWrap) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ this.rsWrap = rsWrap;
+
+ putHisto = getMetricsRegistry().getHistogram(PUT_KEY);
+ deleteHisto = getMetricsRegistry().getHistogram(DELETE_KEY);
+ getHisto = getMetricsRegistry().getHistogram(GET_KEY);
+ incrementHisto = getMetricsRegistry().getHistogram(INCREMENT_KEY);
+ appendHisto = getMetricsRegistry().getHistogram(APPEND_KEY);
+ }
+
+ @Override
+ public void init() {
+ super.init();
+ }
+
+ @Override
+ public void updatePut(long t) {
+ putHisto.add(t);
+ }
+
+ @Override
+ public void updateDelete(long t) {
+ deleteHisto.add(t);
+ }
+
+ @Override
+ public void updateGet(long t) {
+ getHisto.add(t);
+ }
+
+ @Override
+ public void updateIncrement(long t) {
+ incrementHisto.add(t);
+ }
+
+ @Override
+ public void updateAppend(long t) {
+ appendHisto.add(t);
+ }
+
+ /**
+ * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
+ * expectations of java programmers. Instead of returning anything Hadoop metrics expects
+ * getMetrics to push the metrics into the collector.
+ *
+ * @param metricsCollector Collector to accept metrics
+ * @param all push all or only changed?
+ */
+ @Override
+ public void getMetrics(MetricsCollector metricsCollector, boolean all) {
+
+ MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName)
+ .setContext(metricsContext);
+
+ // rsWrap can be null because this function is called inside of init.
+ if (rsWrap != null) {
+ mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions())
+ .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores())
+ .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles())
+ .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemstoreSize())
+ .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize())
+ .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC),
+ rsWrap.getRegionServerStartTime())
+ .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC),
+ rsWrap.getTotalRequestCount())
+ .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC),
+ rsWrap.getReadRequestsCount())
+ .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC),
+ rsWrap.getWriteRequestsCount())
+ .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC),
+ rsWrap.getCheckAndMutateChecksFailed())
+ .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC),
+ rsWrap.getCheckAndMutateChecksPassed())
+ .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC),
+ rsWrap.getStoreFileIndexSize())
+ .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC),
+ rsWrap.getTotalStaticIndexSize())
+ .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC),
+ rsWrap.getTotalStaticBloomSize())
+ .addGauge(Interns.info(NUMBER_OF_PUTS_WITHOUT_WAL, NUMBER_OF_PUTS_WITHOUT_WAL_DESC),
+ rsWrap.getNumPutsWithoutWAL())
+ .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC),
+ rsWrap.getDataInMemoryWithoutWAL())
+ .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC),
+ rsWrap.getPercentFileLocal())
+ .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC),
+ rsWrap.getCompactionQueueSize())
+ .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC),
+ rsWrap.getFlushQueueSize())
+ .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC),
+ rsWrap.getBlockCacheFreeSize())
+ .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC),
+ rsWrap.getBlockCacheCount())
+ .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC),
+ rsWrap.getBlockCacheSize())
+ .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC),
+ rsWrap.getBlockCacheHitCount())
+ .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC),
+ rsWrap.getBlockCacheMissCount())
+ .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC),
+ rsWrap.getBlockCacheEvictedCount())
+ .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC),
+ rsWrap.getBlockCacheHitPercent())
+ .addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT,
+ BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent())
+ .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC),
+ rsWrap.getUpdatesBlockedTime())
+ .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC),
+ rsWrap.getZookeeperQuorum())
+ .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName())
+ .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId());
+ }
+
+ metricsRegistry.snapshot(mrb, all);
+ }
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java
new file mode 100644
index 0000000..754fe33
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource;
+
+/**
+ * Hadoop2 implementation of MetricsReplicationSource. This provides access to metrics gauges and
+ * counters.
+ */
+public class MetricsReplicationSourceImpl extends BaseSourceImpl implements
+ MetricsReplicationSource {
+
+
+ public MetricsReplicationSourceImpl() {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+ }
+
+ MetricsReplicationSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ }
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java
deleted file mode 100644
index 3f2a40d..0000000
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
-
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
-
-/**
- * Hadoop2 implementation of ReplicationMetricsSource. This provides access to metrics gauges and
- * counters.
- */
-public class ReplicationMetricsSourceImpl extends BaseMetricsSourceImpl implements
- ReplicationMetricsSource {
-
-
- public ReplicationMetricsSourceImpl() {
- this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
- }
-
- ReplicationMetricsSourceImpl(String metricsName,
- String metricsDescription,
- String metricsContext,
- String metricsJmxContext) {
- super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
- }
-}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java
new file mode 100644
index 0000000..a52b14c
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.hbase.rest.MetricsRESTSource;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+
+/**
+ * Hadoop Two implementation of a metrics2 source that will export metrics from the Rest server to
+ * the hadoop metrics2 subsystem.
+ */
+public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource {
+
+ private MutableCounterLong request;
+ private MutableCounterLong sucGet;
+ private MutableCounterLong sucPut;
+ private MutableCounterLong sucDel;
+ private MutableCounterLong fGet;
+ private MutableCounterLong fPut;
+ private MutableCounterLong fDel;
+
+ public MetricsRESTSourceImpl() {
+ this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT);
+ }
+
+ public MetricsRESTSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ }
+
+ @Override
+ public void init() {
+ super.init();
+ request = getMetricsRegistry().getLongCounter(REQUEST_KEY, 0l);
+
+ sucGet = getMetricsRegistry().getLongCounter(SUCCESSFUL_GET_KEY, 0l);
+ sucPut = getMetricsRegistry().getLongCounter(SUCCESSFUL_PUT_KEY, 0l);
+ sucDel = getMetricsRegistry().getLongCounter(SUCCESSFUL_DELETE_KEY, 0l);
+
+ fGet = getMetricsRegistry().getLongCounter(FAILED_GET_KEY, 0l);
+ fPut = getMetricsRegistry().getLongCounter(FAILED_PUT_KEY, 0l);
+ fDel = getMetricsRegistry().getLongCounter(FAILED_DELETE_KEY, 0l);
+ }
+
+ @Override
+ public void incrementRequests(int inc) {
+ request.incr(inc);
+ }
+
+ @Override
+ public void incrementSucessfulGetRequests(int inc) {
+ sucGet.incr(inc);
+ }
+
+ @Override
+ public void incrementSucessfulPutRequests(int inc) {
+ sucPut.incr(inc);
+ }
+
+ @Override
+ public void incrementSucessfulDeleteRequests(int inc) {
+ sucDel.incr(inc);
+ }
+
+ @Override
+ public void incrementFailedGetRequests(int inc) {
+ fGet.incr(inc);
+ }
+
+ @Override
+ public void incrementFailedPutRequests(int inc) {
+ fPut.incr(inc);
+ }
+
+ @Override
+ public void incrementFailedDeleteRequests(int inc) {
+ fDel.incr(inc);
+ }
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java
deleted file mode 100644
index a104d36..0000000
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.metrics;
-
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-/**
- * Hadoop Two implementation of a metrics2 source that will export metrics from the Rest server to
- * the hadoop metrics2 subsystem.
- */
-public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements RESTMetricsSource {
-
- private MutableCounterLong request;
- private MutableCounterLong sucGet;
- private MutableCounterLong sucPut;
- private MutableCounterLong sucDel;
- private MutableCounterLong fGet;
- private MutableCounterLong fPut;
- private MutableCounterLong fDel;
-
- public RESTMetricsSourceImpl() {
- this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT);
- }
-
- public RESTMetricsSourceImpl(String metricsName,
- String metricsDescription,
- String metricsContext,
- String metricsJmxContext) {
- super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
- }
-
- @Override
- public void init() {
- super.init();
- request = getMetricsRegistry().getLongCounter(REQUEST_KEY, 0l);
-
- sucGet = getMetricsRegistry().getLongCounter(SUCCESSFUL_GET_KEY, 0l);
- sucPut = getMetricsRegistry().getLongCounter(SUCCESSFUL_PUT_KEY, 0l);
- sucDel = getMetricsRegistry().getLongCounter(SUCCESSFUL_DELETE_KEY, 0l);
-
- fGet = getMetricsRegistry().getLongCounter(FAILED_GET_KEY, 0l);
- fPut = getMetricsRegistry().getLongCounter(FAILED_PUT_KEY, 0l);
- fDel = getMetricsRegistry().getLongCounter(FAILED_DELETE_KEY, 0l);
- }
-
- @Override
- public void incrementRequests(int inc) {
- request.incr(inc);
- }
-
- @Override
- public void incrementSucessfulGetRequests(int inc) {
- sucGet.incr(inc);
- }
-
- @Override
- public void incrementSucessfulPutRequests(int inc) {
- sucPut.incr(inc);
- }
-
- @Override
- public void incrementSucessfulDeleteRequests(int inc) {
- sucDel.incr(inc);
- }
-
- @Override
- public void incrementFailedGetRequests(int inc) {
- fGet.incr(inc);
- }
-
- @Override
- public void incrementFailedPutRequests(int inc) {
- fPut.incr(inc);
- }
-
- @Override
- public void incrementFailedDeleteRequests(int inc) {
- fDel.incr(inc);
- }
-}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java
new file mode 100644
index 0000000..b7c037f
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift;
+
+/**
+ * Class used to create metrics sources for Thrift and Thrift2 servers.
+ */
+public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory {
+
+ /**
+ * A singleton used to make sure that only one thrift metrics source per server type is ever
+ * created.
+ */
+ private static enum FactoryStorage {
+ INSTANCE;
+ MetricsThriftServerSourceImpl thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME,
+ METRICS_DESCRIPTION,
+ THRIFT_ONE_METRICS_CONTEXT,
+ THRIFT_ONE_JMX_CONTEXT);
+ MetricsThriftServerSourceImpl thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME,
+ METRICS_DESCRIPTION,
+ THRIFT_TWO_METRICS_CONTEXT,
+ THRIFT_TWO_JMX_CONTEXT);
+ }
+
+ @Override
+ public MetricsThriftServerSource createThriftOneSource() {
+ return FactoryStorage.INSTANCE.thriftOne;
+ }
+
+ @Override
+ public MetricsThriftServerSource createThriftTwoSource() {
+ return FactoryStorage.INSTANCE.thriftTwo;
+ }
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java
new file mode 100644
index 0000000..5cf3d91
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.hbase.thrift.MetricsThriftServerSource;
+import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+import org.apache.hadoop.metrics2.lib.MutableStat;
+
+/**
+ * Hadoop 2 version of MetricsThriftServerSource{@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource}
+ */
+public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements
+ MetricsThriftServerSource {
+
+ private MutableStat batchGetStat;
+ private MutableStat batchMutateStat;
+ private MutableStat queueTimeStat;
+
+ private MutableStat thriftCallStat;
+ private MutableStat thriftSlowCallStat;
+
+ private MutableGaugeLong callQueueLenGauge;
+
+ public MetricsThriftServerSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ }
+
+ @Override
+ public void init() {
+ super.init();
+ batchGetStat = getMetricsRegistry().newStat(BATCH_GET_KEY, "", "Keys", "Ops");
+ batchMutateStat = getMetricsRegistry().newStat(BATCH_MUTATE_KEY, "", "Keys", "Ops");
+ queueTimeStat = getMetricsRegistry().newRate(TIME_IN_QUEUE_KEY) ;
+
+ thriftCallStat = getMetricsRegistry().newRate(THRIFT_CALL_KEY);
+ thriftSlowCallStat = getMetricsRegistry().newRate(SLOW_THRIFT_CALL_KEY);
+
+ callQueueLenGauge = getMetricsRegistry().getLongGauge(CALL_QUEUE_LEN_KEY, 0) ;
+
+ }
+
+ @Override
+ public void incTimeInQueue(long time) {
+ queueTimeStat.add(time);
+ }
+
+ @Override
+ public void setCallQueueLen(int len) {
+ callQueueLenGauge.set(len);
+ }
+
+ @Override
+ public void incNumRowKeysInBatchGet(int diff) {
+ batchGetStat.add(diff);
+ }
+
+ @Override
+ public void incNumRowKeysInBatchMutate(int diff) {
+ batchMutateStat.add(diff);
+ }
+
+ @Override
+ public void incMethodTime(String name, long time) {
+ MutableStat s = getMetricsRegistry().newRate(name);
+ s.add(time);
+ }
+
+ @Override
+ public void incCall(long time) {
+ thriftCallStat.add(time);
+ }
+
+ @Override
+ public void incSlowCall(long time) {
+ thriftSlowCallStat.add(time);
+ }
+
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java
deleted file mode 100644
index 718e4b0..0000000
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.thrift.metrics;
-
-/**
- * Class used to create metrics sources for Thrift and Thrift2 servers.
- */
-public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetricsSourceFactory {
-
- /**
- * A singleton used to make sure that only one thrift metrics source per server type is ever
- * created.
- */
- private static enum FactoryStorage {
- INSTANCE;
- ThriftServerMetricsSourceImpl thriftOne = new ThriftServerMetricsSourceImpl(METRICS_NAME,
- METRICS_DESCRIPTION,
- THRIFT_ONE_METRICS_CONTEXT,
- THRIFT_ONE_JMX_CONTEXT);
- ThriftServerMetricsSourceImpl thriftTwo = new ThriftServerMetricsSourceImpl(METRICS_NAME,
- METRICS_DESCRIPTION,
- THRIFT_TWO_METRICS_CONTEXT,
- THRIFT_TWO_JMX_CONTEXT);
- }
-
- @Override
- public ThriftServerMetricsSource createThriftOneSource() {
- return FactoryStorage.INSTANCE.thriftOne;
- }
-
- @Override
- public ThriftServerMetricsSource createThriftTwoSource() {
- return FactoryStorage.INSTANCE.thriftTwo;
- }
-}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java
deleted file mode 100644
index 5c9348f..0000000
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.thrift.metrics;
-
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
-import org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSource;
-import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
-import org.apache.hadoop.metrics2.lib.MutableStat;
-
-/**
- * Hadoop 2 version of ThriftServerMetricsSource{@link ThriftServerMetricsSource}
- */
-public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl implements
- ThriftServerMetricsSource {
-
- private MutableStat batchGetStat;
- private MutableStat batchMutateStat;
- private MutableStat queueTimeStat;
-
- private MutableStat thriftCallStat;
- private MutableStat thriftSlowCallStat;
-
- private MutableGaugeLong callQueueLenGauge;
-
- public ThriftServerMetricsSourceImpl(String metricsName,
- String metricsDescription,
- String metricsContext,
- String metricsJmxContext) {
- super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
- }
-
- @Override
- public void init() {
- super.init();
- batchGetStat = getMetricsRegistry().newStat(BATCH_GET_KEY, "", "Keys", "Ops");
- batchMutateStat = getMetricsRegistry().newStat(BATCH_MUTATE_KEY, "", "Keys", "Ops");
- queueTimeStat = getMetricsRegistry().newRate(TIME_IN_QUEUE_KEY) ;
-
- thriftCallStat = getMetricsRegistry().newRate(THRIFT_CALL_KEY);
- thriftSlowCallStat = getMetricsRegistry().newRate(SLOW_THRIFT_CALL_KEY);
-
- callQueueLenGauge = getMetricsRegistry().getLongGauge(CALL_QUEUE_LEN_KEY, 0) ;
-
- }
-
- @Override
- public void incTimeInQueue(long time) {
- queueTimeStat.add(time);
- }
-
- @Override
- public void setCallQueueLen(int len) {
- callQueueLenGauge.set(len);
- }
-
- @Override
- public void incNumRowKeysInBatchGet(int diff) {
- batchGetStat.add(diff);
- }
-
- @Override
- public void incNumRowKeysInBatchMutate(int diff) {
- batchMutateStat.add(diff);
- }
-
- @Override
- public void incMethodTime(String name, long time) {
- MutableStat s = getMetricsRegistry().newRate(name);
- s.add(time);
- }
-
- @Override
- public void incCall(long time) {
- thriftCallStat.add(time);
- }
-
- @Override
- public void incSlowCall(long time) {
- thriftSlowCallStat.add(time);
- }
-
-}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
new file mode 100644
index 0000000..8512afd
--- /dev/null
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+
+
+public class JmxCacheBuster {
+ private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
+
+ public static void clearJmxCache() {
+ LOG.trace("Clearing JMX mbean cache.");
+
+ // This is pretty extreme but it's the best way that
+ // I could find to get metrics to be removed.
+ try {
+ if (DefaultMetricsSystem.instance() != null ) {
+ DefaultMetricsSystem.instance().stop();
+ DefaultMetricsSystem.instance().start();
+ }
+
+ } catch (Exception exception ) {
+ LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", exception);
+ }
+ }
+}
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
index a423893..080bd4d 100644
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
@@ -528,6 +528,7 @@ public class DynamicMetricsRegistry {
return returnExistingWithCast(metric, metricClass, name);
}
+ @SuppressWarnings("unchecked")
private T returnExistingWithCast(MutableMetric metric,
Class metricClass, String name) {
if (!metricClass.isAssignableFrom(metric.getClass())) {
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
index 28e92c1..766cf96 100644
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
@@ -22,8 +22,8 @@ import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.MetricHistogram;
-import org.apache.hadoop.metrics.MetricsExecutor;
+import org.apache.hadoop.metrics2.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricsExecutor;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.util.MetricQuantile;
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
index 3135758..d47912c 100644
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2.lib;
-import org.apache.hadoop.metrics.MetricsExecutor;
+import org.apache.hadoop.metrics2.MetricsExecutor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
index 4fb0be9..3b012e9 100644
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
@@ -22,7 +22,7 @@ import com.yammer.metrics.stats.ExponentiallyDecayingSample;
import com.yammer.metrics.stats.Sample;
import com.yammer.metrics.stats.Snapshot;
import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.metrics.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory
new file mode 100644
index 0000000..a5e43e4
--- /dev/null
+++ hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl
\ No newline at end of file
diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory
deleted file mode 100644
index e81c3dc..0000000
--- hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactoryImpl
diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory
new file mode 100644
index 0000000..bc2f643
--- /dev/null
+++ hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl
diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource
new file mode 100644
index 0000000..1e0dd20
--- /dev/null
+++ hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl
\ No newline at end of file
diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource
deleted file mode 100644
index bb64ad5..0000000
--- hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceImpl
\ No newline at end of file
diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource
new file mode 100644
index 0000000..5a4a8e9
--- /dev/null
+++ hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl
\ No newline at end of file
diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource
deleted file mode 100644
index 9e7a28d..0000000
--- hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.rest.metrics.RESTMetricsSourceImpl
\ No newline at end of file
diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory
new file mode 100644
index 0000000..2b5c163
--- /dev/null
+++ hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl
\ No newline at end of file
diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory
deleted file mode 100644
index 62d1c6a..0000000
--- hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactoryImpl
\ No newline at end of file
diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor
new file mode 100644
index 0000000..dc12052
--- /dev/null
+++ hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor
@@ -0,0 +1 @@
+org.apache.hadoop.metrics2.lib.MetricsExecutorImpl
diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsSourceImpl.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsSourceImpl.java
new file mode 100644
index 0000000..0db47f9
--- /dev/null
+++ hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsSourceImpl.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSource;
+import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSourceImpl;
+import org.junit.Test;
+
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for MetricsMasterSourceImpl
+ */
+public class TestMasterMetricsSourceImpl {
+
+ @Test
+ public void testGetInstance() throws Exception {
+ MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory
+ .getInstance(MetricsMasterSourceFactory.class);
+ MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null);
+ assertTrue(masterSource instanceof MetricsMasterSourceImpl);
+ assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class));
+ }
+
+}
diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java
deleted file mode 100644
index fe384d7..0000000
--- hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for MasterMetricsSourceImpl
- */
-public class TestMasterMetricsSourceImpl {
-
- @Test
- public void testGetInstance() throws Exception {
- MasterMetricsSourceFactory masterMetricsSourceFactory = CompatibilitySingletonFactory
- .getInstance(MasterMetricsSourceFactory.class);
- MasterMetricsSource masterMetricsSource = masterMetricsSourceFactory.create(null);
- assertTrue(masterMetricsSource instanceof MasterMetricsSourceImpl);
- assertSame(masterMetricsSourceFactory, CompatibilitySingletonFactory.getInstance(MasterMetricsSourceFactory.class));
- }
-
-}
diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImpl.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImpl.java
index f334702..a8acc5f 100644
--- hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImpl.java
+++ hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImpl.java
@@ -27,15 +27,15 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
/**
- * Test of default BaseMetricsSource for hadoop 2
+ * Test of default BaseSource for hadoop 2
*/
public class TestBaseMetricsSourceImpl {
- private static BaseMetricsSourceImpl bmsi;
+ private static BaseSourceImpl bmsi;
@BeforeClass
public static void setUp() throws Exception {
- bmsi = new BaseMetricsSourceImpl("TestName", "test description", "testcontext", "TestContext");
+ bmsi = new BaseSourceImpl("TestName", "test description", "testcontext", "TestContext");
}
@Test
@@ -75,16 +75,10 @@ public class TestBaseMetricsSourceImpl {
}
@Test
- public void testRemoveGauge() throws Exception {
+ public void testRemoveMetric() throws Exception {
bmsi.setGauge("testrmgauge", 100);
- bmsi.removeGauge("testrmgauge");
+ bmsi.removeMetric("testrmgauge");
assertNull(bmsi.metricsRegistry.get("testrmgauge"));
}
- @Test
- public void testRemoveCounter() throws Exception {
- bmsi.incCounters("testrmcounter", 100);
- bmsi.removeCounter("testrmcounter");
- assertNull(bmsi.metricsRegistry.get("testrmcounter"));
- }
}
diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/RegionServerMetricsSourceImplTest.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/RegionServerMetricsSourceImplTest.java
new file mode 100644
index 0000000..f2ca1d4
--- /dev/null
+++ hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/RegionServerMetricsSourceImplTest.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceImpl;
+import org.junit.Test;
+
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for MetricsRegionServerSourceImpl
+ */
+public class RegionServerMetricsSourceImplTest {
+
+ @Test
+ public void testGetInstance() throws Exception {
+ MetricsRegionServerSourceFactory metricsRegionServerSourceFactory =
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
+ MetricsRegionServerSource generalSource =
+ metricsRegionServerSourceFactory.createGeneral(null);
+ assertTrue(generalSource instanceof MetricsRegionServerSourceImpl);
+ assertSame(metricsRegionServerSourceFactory,
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class));
+ }
+
+
+ @Test(expected = RuntimeException.class)
+ public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
+ // This should throw an exception because MetricsRegionServerSourceImpl should only
+ // be created by a factory.
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceImpl.class);
+ }
+}
diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java
new file mode 100644
index 0000000..c2c379a
--- /dev/null
+++ hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+
+/** Test for MetricsReplicationSourceImpl */
+public class TestReplicationMetricsSourceImpl {
+
+ @Test
+ public void testGetInstance() throws Exception {
+ MetricsReplicationSource rms = CompatibilitySingletonFactory
+ .getInstance(MetricsReplicationSource.class);
+ assertTrue(rms instanceof MetricsReplicationSourceImpl);
+ }
+}
diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java
deleted file mode 100644
index 04248e0..0000000
--- hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-import static org.junit.Assert.assertTrue;
-
-/** Test for ReplicationMetricsSourceImpl */
-public class TestReplicationMetricsSourceImpl {
-
- @Test
- public void testGetInstance() throws Exception {
- ReplicationMetricsSource rms = CompatibilitySingletonFactory
- .getInstance(ReplicationMetricsSource.class);
- assertTrue(rms instanceof ReplicationMetricsSourceImpl);
- }
-}
diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java
new file mode 100644
index 0000000..d7719e1
--- /dev/null
+++ hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.rest.MetricsRESTSource;
+import org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl;
+import org.junit.Test;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for hadoop 2's version of MetricsRESTSource
+ */
+public class TestRESTMetricsSourceImpl {
+
+ @Test
+ public void ensureCompatRegistered() throws Exception {
+ assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class));
+ assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl);
+ }
+
+}
diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java
deleted file mode 100644
index cc9c82d..0000000
--- hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for hadoop 2's version of RESTMetricsSource
- */
-public class TestRESTMetricsSourceImpl {
-
- @Test
- public void ensureCompatRegistered() throws Exception {
- assertNotNull(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class));
- assertTrue(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class) instanceof RESTMetricsSourceImpl);
- }
-
-}
diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
index b8b06ab..29c74de 100644
--- hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
+++ hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hbase.test;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
+import org.apache.hadoop.hbase.metrics.BaseSource;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsInfo;
@@ -129,68 +129,68 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public void assertTag(String name, String expected, BaseMetricsSource source) {
+ public void assertTag(String name, String expected, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertEquals("Tags should be equal", expected, tags.get(cName));
}
@Override
- public void assertGauge(String name, long expected, BaseMetricsSource source) {
+ public void assertGauge(String name, long expected, BaseSource source) {
long found = getGaugeLong(name, source);
assertEquals("Metrics Should be equal", (long) Long.valueOf(expected), found);
}
@Override
- public void assertGaugeGt(String name, long expected, BaseMetricsSource source) {
+ public void assertGaugeGt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertGaugeLt(String name, long expected, BaseMetricsSource source) {
+ public void assertGaugeLt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public void assertGauge(String name, double expected, BaseMetricsSource source) {
+ public void assertGauge(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
- assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found);
+ assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found, 0.01);
}
@Override
- public void assertGaugeGt(String name, double expected, BaseMetricsSource source) {
+ public void assertGaugeGt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertGaugeLt(String name, double expected, BaseMetricsSource source) {
+ public void assertGaugeLt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public void assertCounter(String name, long expected, BaseMetricsSource source) {
+ public void assertCounter(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertEquals("Metrics Counters should be equal", (long) Long.valueOf(expected), found);
}
@Override
- public void assertCounterGt(String name, long expected, BaseMetricsSource source) {
+ public void assertCounterGt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertCounterLt(String name, long expected, BaseMetricsSource source) {
+ public void assertCounterLt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public long getCounter(String name, BaseMetricsSource source) {
+ public long getCounter(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(counters.get(cName));
@@ -198,7 +198,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public double getGaugeDouble(String name, BaseMetricsSource source) {
+ public double getGaugeDouble(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@@ -206,7 +206,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public long getGaugeLong(String name, BaseMetricsSource source) {
+ public long getGaugeLong(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@@ -220,12 +220,12 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
counters.clear();
}
- private void getMetrics(BaseMetricsSource source) {
+ private void getMetrics(BaseSource source) {
reset();
- if (!(source instanceof BaseMetricsSourceImpl)) {
+ if (!(source instanceof BaseSourceImpl)) {
assertTrue(false);
}
- BaseMetricsSourceImpl impl = (BaseMetricsSourceImpl) source;
+ BaseSourceImpl impl = (BaseSourceImpl) source;
impl.getMetrics(new MockMetricsBuilder(), true);
diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java
new file mode 100644
index 0000000..1f5403c
--- /dev/null
+++ hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory;
+import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl;
+import org.junit.Test;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for hadoop 2's version of MetricsThriftServerSourceFactory
+ */
+public class TestThriftServerMetricsSourceFactoryImpl {
+
+ @Test
+ public void testCompatabilityRegistered() throws Exception {
+ assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class));
+ assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl);
+ }
+
+ @Test
+ public void testCreateThriftOneSource() throws Exception {
+ //Make sure that the factory gives back a singleton.
+ assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(),
+ new MetricsThriftServerSourceFactoryImpl().createThriftOneSource());
+
+ }
+
+ @Test
+ public void testCreateThriftTwoSource() throws Exception {
+ //Make sure that the factory gives back a singleton.
+ assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(),
+ new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource());
+ }
+}
diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java
deleted file mode 100644
index c66c36d..0000000
--- hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.thrift.metrics;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for hadoop 2's version of ThriftServerMetricsSourceFactory
- */
-public class TestThriftServerMetricsSourceFactoryImpl {
-
- @Test
- public void testCompatabilityRegistered() throws Exception {
- assertNotNull(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class));
- assertTrue(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class) instanceof ThriftServerMetricsSourceFactoryImpl);
- }
-
- @Test
- public void testCreateThriftOneSource() throws Exception {
- //Make sure that the factory gives back a singleton.
- assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource(),
- new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource());
-
- }
-
- @Test
- public void testCreateThriftTwoSource() throws Exception {
- //Make sure that the factory gives back a singleton.
- assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource(),
- new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource());
- }
-}
diff --git hbase-hadoop2-compat/src/test/resources/hadoop-metrics2.properties hbase-hadoop2-compat/src/test/resources/hadoop-metrics2.properties
new file mode 100644
index 0000000..e69de29
diff --git hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
index ee66fdd..df20f3c 100644
--- hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
+++ hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
@@ -24,7 +24,6 @@ String format = "html";
<%import>
java.util.*;
org.apache.hadoop.hbase.regionserver.HRegionServer;
-org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.HRegionInfo;
org.apache.hadoop.hbase.ServerName;
@@ -38,7 +37,6 @@ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
<%java return; %>
%if>
<%java>
- RegionServerMetrics metrics = regionServer.getMetrics();
ServerInfo serverInfo = ProtobufUtil.getServerInfo(regionServer);
ServerName serverName = ProtobufUtil.toServerName(serverInfo.getServerName());
List onlineRegions = ProtobufUtil.getOnlineRegions(regionServer);
@@ -98,7 +96,7 @@ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
Server Metrics
- <& ServerMetricsTmpl; metrics = metrics; &>
+ <& ServerMetricsTmpl; mWrap = regionServer.getMetrics().getRegionServerWrapper(); &>
<& ../common/TaskMonitorTmpl; filter = filter &>
diff --git hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index ac0fe6f..9977939 100644
--- hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -23,7 +23,6 @@
<%import>
java.util.*;
org.apache.hadoop.hbase.regionserver.HRegionServer;
- org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.HRegionInfo;
org.apache.hadoop.hbase.ServerName;
diff --git hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
index 0478c15..d05aab3 100644
--- hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
+++ hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
@@ -17,12 +17,12 @@ See the License for the specific language governing permissions and
limitations under the License.
%doc>
<%args>
- RegionServerMetrics metrics;
+MetricsRegionServerWrapper mWrap;
%args>
<%import>
java.util.*;
org.apache.hadoop.hbase.regionserver.HRegionServer;
-org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.HRegionInfo;
org.apache.hadoop.hbase.ServerName;
@@ -46,32 +46,32 @@ java.lang.management.ManagementFactory;
%def>
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
index b871865..ad0105e 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
@@ -29,15 +29,13 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.io.RawComparator;
/**
* Common functionality needed by all versions of {@link HFile} readers.
*/
@InterfaceAudience.Private
-public abstract class AbstractHFileReader extends SchemaConfigured
- implements HFile.Reader {
+public abstract class AbstractHFileReader implements HFile.Reader {
/** Filesystem-level block reader for this HFile format version. */
protected HFileBlock.FSReader fsBlockReader;
@@ -118,7 +116,6 @@ public abstract class AbstractHFileReader extends SchemaConfigured
final long fileSize,
final boolean closeIStream,
final CacheConfig cacheConf, final HFileSystem hfs) {
- super(null, path);
this.trailer = trailer;
this.compressAlgo = trailer.getCompressionCodec();
this.cacheConf = cacheConf;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
index 635b407..fe57a13 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.RawComparator;
@@ -43,8 +42,7 @@ import org.apache.hadoop.io.Writable;
* Common functionality needed by all versions of {@link HFile} writers.
*/
@InterfaceAudience.Private
-public abstract class AbstractHFileWriter extends SchemaConfigured
- implements HFile.Writer {
+public abstract class AbstractHFileWriter implements HFile.Writer {
/** Key previously appended. Becomes the last key in the file. */
protected byte[] lastKeyBuffer = null;
@@ -115,7 +113,6 @@ public abstract class AbstractHFileWriter extends SchemaConfigured
Compression.Algorithm compressAlgo,
HFileDataBlockEncoder dataBlockEncoder,
KeyComparator comparator) {
- super(null, path);
this.outputStream = outputStream;
this.path = path;
this.name = path != null ? path.getName() : outputStream.toString();
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
index c6b12eb..b205106 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
@@ -23,7 +23,6 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
/**
* Cacheable is an interface that allows for an object to be cached. If using an
@@ -57,14 +56,4 @@ public interface Cacheable extends HeapSize {
*/
public CacheableDeserializer getDeserializer();
- /**
- * @return the block type of this cached HFile block
- */
- public BlockType getBlockType();
-
- /**
- * @return the metrics object identified by table and column family
- */
- public SchemaMetrics getSchemaMetrics();
-
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 598e3ba..e63417f 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -48,8 +48,6 @@ import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.HbaseMapWritable;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.SchemaAware;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
@@ -283,8 +281,6 @@ public class HFile {
/** @return the path to this {@link HFile} */
Path getPath();
- String getColumnFamilyName();
-
void appendMetaBlock(String bloomFilterMetaKey, Writable metaWriter);
/**
@@ -430,7 +426,6 @@ public class HFile {
*/
public static final WriterFactory getWriterFactory(Configuration conf,
CacheConfig cacheConf) {
- SchemaMetrics.configureGlobally(conf);
int version = getFormatVersion(conf);
switch (version) {
case 1:
@@ -452,8 +447,7 @@ public class HFile {
}
/** An interface used by clients to open and iterate an {@link HFile}. */
- public interface Reader extends Closeable, CachingBlockReader,
- SchemaAware {
+ public interface Reader extends Closeable, CachingBlockReader {
/**
* Returns this reader's "name". Usually the last component of the path.
* Needs to be constant as the file is being moved to support caching on
@@ -461,8 +455,6 @@ public class HFile {
*/
String getName();
- String getColumnFamilyName();
-
RawComparator getComparator();
HFileScanner getScanner(boolean cacheBlocks,
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index c93c8d5..d3a9b0e 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
import org.apache.hadoop.hbase.regionserver.MemStore;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.ClassSize;
@@ -84,7 +83,7 @@ import com.google.common.base.Preconditions;
* except that the data section is always uncompressed in the cache.
*/
@InterfaceAudience.Private
-public class HFileBlock extends SchemaConfigured implements Cacheable {
+public class HFileBlock implements Cacheable {
/** Minor versions starting with this number have hbase checksums */
static final int MINOR_VERSION_WITH_CHECKSUM = 1;
@@ -538,8 +537,6 @@ public class HFileBlock extends SchemaConfigured implements Cacheable {
@Override
public long heapSize() {
long size = ClassSize.align(
- // Base class size, including object overhead.
- SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE +
// Block type and byte buffer references
2 * ClassSize.REFERENCE +
// On-disk size, uncompressed size, and next block's on-disk size
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 7fbc06d..c7cf874 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.CompoundBloomFilterWriter;
@@ -719,8 +718,7 @@ public class HFileBlockIndex {
* index. However, in most practical cases we will only have leaf-level
* blocks and the root index, or just the root index.
*/
- public static class BlockIndexWriter extends SchemaConfigured
- implements InlineBlockWriter {
+ public static class BlockIndexWriter implements InlineBlockWriter {
/**
* While the index is being written, this represents the current block
* index referencing all leaf blocks, with one exception. If the file is
@@ -954,7 +952,6 @@ public class HFileBlockIndex {
if (blockCache != null) {
HFileBlock blockForCaching = blockWriter.getBlockForCaching();
- passSchemaMetricsTo(blockForCaching);
blockCache.cacheBlock(new BlockCacheKey(nameForCaching,
beginOffset, DataBlockEncoding.NONE,
blockForCaching.getBlockType()), blockForCaching);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
index 013218a..603f1c3 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
@@ -243,7 +243,6 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
includesMemstoreTS, block.getMinorVersion(),
block.getBytesPerChecksum(), block.getChecksumType(),
block.getOnDiskDataSizeWithHeader());
- block.passSchemaMetricsTo(encodedBlock);
return encodedBlock;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index e1882b3..be0fb17 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.BloomFilter;
import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.ByteBloomFilter;
@@ -174,7 +173,6 @@ public class HFilePrettyPrinter {
conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR));
conf.set("fs.default.name",
conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR));
- SchemaMetrics.configureGlobally(conf);
try {
if (!parseOptions(args))
return 1;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java
index 56339da..436d0c4 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.RawComparator;
@@ -235,8 +234,6 @@ public class HFileReaderV1 extends AbstractHFileReader {
cacheConf.shouldCacheBlockOnRead(effectiveCategory));
if (cachedBlock != null) {
cacheHits.incrementAndGet();
- getSchemaMetrics().updateOnCacheHit(effectiveCategory,
- SchemaMetrics.NO_COMPACTION);
return cachedBlock.getBufferWithoutHeader();
}
// Cache Miss, please load.
@@ -245,13 +242,10 @@ public class HFileReaderV1 extends AbstractHFileReader {
HFileBlock hfileBlock = fsBlockReader.readBlockData(offset,
nextOffset - offset, metaBlockIndexReader.getRootBlockDataSize(block),
true);
- passSchemaMetricsTo(hfileBlock);
hfileBlock.expectType(BlockType.META);
final long delta = System.nanoTime() - startTimeNs;
HFile.offerReadLatency(delta, true);
- getSchemaMetrics().updateOnCacheMiss(effectiveCategory,
- SchemaMetrics.NO_COMPACTION, delta);
// Cache the block
if (cacheBlock && cacheConf.shouldCacheBlockOnRead(effectiveCategory)) {
@@ -300,8 +294,6 @@ public class HFileReaderV1 extends AbstractHFileReader {
cacheConf.shouldCacheDataOnRead());
if (cachedBlock != null) {
cacheHits.incrementAndGet();
- getSchemaMetrics().updateOnCacheHit(
- cachedBlock.getBlockType().getCategory(), isCompaction);
return cachedBlock.getBufferWithoutHeader();
}
// Carry on, please load.
@@ -323,13 +315,10 @@ public class HFileReaderV1 extends AbstractHFileReader {
HFileBlock hfileBlock = fsBlockReader.readBlockData(offset, nextOffset
- offset, dataBlockIndexReader.getRootBlockDataSize(block), pread);
- passSchemaMetricsTo(hfileBlock);
hfileBlock.expectType(BlockType.DATA);
final long delta = System.nanoTime() - startTimeNs;
HFile.offerReadLatency(delta, pread);
- getSchemaMetrics().updateOnCacheMiss(BlockCategory.DATA, isCompaction,
- delta);
// Cache the block
if (cacheBlock && cacheConf.shouldCacheBlockOnRead(
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index e252f38..71e4d09 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -226,7 +226,6 @@ public class HFileReaderV2 extends AbstractHFileReader {
// Return a distinct 'shallow copy' of the block,
// so pos does not get messed by the scanner
cacheHits.incrementAndGet();
- getSchemaMetrics().updateOnCacheHit(BlockCategory.META, false);
return cachedBlock.getBufferWithoutHeader();
}
// Cache Miss, please load.
@@ -234,11 +233,9 @@ public class HFileReaderV2 extends AbstractHFileReader {
HFileBlock metaBlock = fsBlockReader.readBlockData(metaBlockOffset,
blockSize, -1, true);
- passSchemaMetricsTo(metaBlock);
final long delta = System.nanoTime() - startTimeNs;
HFile.offerReadLatency(delta, true);
- getSchemaMetrics().updateOnCacheMiss(BlockCategory.META, false, delta);
// Cache the block
if (cacheBlock) {
@@ -302,7 +299,6 @@ public class HFileReaderV2 extends AbstractHFileReader {
cachedBlock.getBlockType().getCategory();
cacheHits.incrementAndGet();
- getSchemaMetrics().updateOnCacheHit(blockCategory, isCompaction);
if (cachedBlock.getBlockType() == BlockType.DATA) {
HFile.dataBlockReadCnt.incrementAndGet();
@@ -331,12 +327,10 @@ public class HFileReaderV2 extends AbstractHFileReader {
hfileBlock = dataBlockEncoder.diskToCacheFormat(hfileBlock,
isCompaction);
validateBlockType(hfileBlock, expectedBlockType);
- passSchemaMetricsTo(hfileBlock);
BlockCategory blockCategory = hfileBlock.getBlockType().getCategory();
final long delta = System.nanoTime() - startTimeNs;
HFile.offerReadLatency(delta, pread);
- getSchemaMetrics().updateOnCacheMiss(blockCategory, isCompaction, delta);
// Cache the block if necessary
if (cacheBlock && cacheConf.shouldCacheBlockOnRead(
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
index 3861c00..a4956a6 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
import org.apache.hadoop.hbase.regionserver.MemStore;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.hadoop.hbase.util.Bytes;
@@ -108,7 +107,6 @@ public class HFileWriterV1 extends AbstractHFileWriter {
final KeyComparator comparator) throws IOException {
super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
blockSize, compress, blockEncoder, comparator);
- SchemaMetrics.configureGlobally(conf);
}
/**
@@ -157,7 +155,6 @@ public class HFileWriterV1 extends AbstractHFileWriter {
HFileBlock.HEADER_SIZE_NO_CHECKSUM); // onDiskDataSizeWithHeader
block = blockEncoder.diskToCacheFormat(block, false);
- passSchemaMetricsTo(block);
cacheConf.getBlockCache().cacheBlock(
new BlockCacheKey(name, blockBegin, DataBlockEncoding.NONE,
block.getBlockType()), block);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
index 92c3a62..63dfc9f 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.hadoop.hbase.util.Bytes;
@@ -113,7 +112,6 @@ public class HFileWriterV2 extends AbstractHFileWriter {
super(cacheConf,
ostream == null ? createOutputStream(conf, fs, path) : ostream,
path, blockSize, compressAlgo, blockEncoder, comparator);
- SchemaMetrics.configureGlobally(conf);
this.checksumType = checksumType;
this.bytesPerChecksum = bytesPerChecksum;
finishInit(conf);
@@ -140,16 +138,6 @@ public class HFileWriterV2 extends AbstractHFileWriter {
// Meta data block index writer
metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
LOG.debug("Initialized with " + cacheConf);
-
- if (isSchemaConfigured()) {
- schemaConfigurationChanged();
- }
- }
-
- @Override
- protected void schemaConfigurationChanged() {
- passSchemaMetricsTo(dataBlockIndexWriter);
- passSchemaMetricsTo(metaBlockIndexWriter);
}
/**
@@ -226,7 +214,6 @@ public class HFileWriterV2 extends AbstractHFileWriter {
final boolean isCompaction = false;
HFileBlock cacheFormatBlock = blockEncoder.diskToCacheFormat(
fsBlockWriter.getBlockForCaching(), isCompaction);
- passSchemaMetricsTo(cacheFormatBlock);
cacheConf.getBlockCache().cacheBlock(
new BlockCacheKey(name, offset, blockEncoder.getEncodingInCache(),
cacheFormatBlock.getBlockType()), cacheFormatBlock);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 8a6929b..74b6212 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -326,12 +325,6 @@ public class LruBlockCache implements BlockCache, HeapSize {
if (evict) {
heapsize *= -1;
}
- Cacheable cachedBlock = cb.getBuffer();
- SchemaMetrics schemaMetrics = cachedBlock.getSchemaMetrics();
- if (schemaMetrics != null) {
- schemaMetrics.updateOnCachePutOrEvict(
- cachedBlock.getBlockType().getCategory(), heapsize, evict);
- }
return size.addAndGet(heapsize);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 38dec34..8d60881 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -65,7 +65,6 @@ import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
-import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
@@ -152,7 +151,7 @@ public class AssignmentManager extends ZooKeeperListener {
EventType.RS_ZK_REGION_FAILED_OPEN, EventType.RS_ZK_REGION_CLOSED });
// metrics instance to send metrics for RITs
- MasterMetrics masterMetrics;
+ MetricsMaster metricsMaster;
private final RegionStates regionStates;
@@ -176,7 +175,7 @@ public class AssignmentManager extends ZooKeeperListener {
*/
public AssignmentManager(Server server, ServerManager serverManager,
CatalogTracker catalogTracker, final LoadBalancer balancer,
- final ExecutorService service, MasterMetrics metrics) throws KeeperException, IOException {
+ final ExecutorService service, MetricsMaster metricsMaster) throws KeeperException, IOException {
super(server.getZooKeeper());
this.server = server;
this.serverManager = serverManager;
@@ -200,7 +199,7 @@ public class AssignmentManager extends ZooKeeperListener {
int maxThreads = conf.getInt("hbase.assignment.threads.max", 30);
this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool(
maxThreads, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory("hbase-am"));
- this.masterMetrics = metrics;// can be null only with tests.
+ this.metricsMaster = metricsMaster;// can be null only with tests.
this.regionStates = new RegionStates(server, serverManager);
int workers = conf.getInt("hbase.assignment.zkevent.workers", 5);
@@ -2335,10 +2334,10 @@ public class AssignmentManager extends ZooKeeperListener {
oldestRITTime = ritTime;
}
}
- if (this.masterMetrics != null) {
- this.masterMetrics.updateRITOldestAge(oldestRITTime);
- this.masterMetrics.updateRITCount(totalRITs);
- this.masterMetrics.updateRITCountOverThreshold(totalRITsOverThreshold);
+ if (this.metricsMaster != null) {
+ this.metricsMaster.updateRITOldestAge(oldestRITTime);
+ this.metricsMaster.updateRITCount(totalRITs);
+ this.metricsMaster.updateRITCountOverThreshold(totalRITsOverThreshold);
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index fcad988..4d4447a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -96,8 +96,6 @@ import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableEventHandler;
import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
-import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
-import org.apache.hadoop.hbase.master.metrics.MasterMetricsWrapperImpl;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
@@ -247,7 +245,7 @@ Server {
private final InetSocketAddress isa;
// Metrics for the HMaster
- private final MasterMetrics metrics;
+ private final MetricsMaster metricsMaster;
// file system manager for the master FS operations
private MasterFileSystem fileSystemManager;
@@ -383,7 +381,7 @@ Server {
//should we check the compression codec type at master side, default true, HBASE-6370
this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
- this.metrics = new MasterMetrics( new MasterMetricsWrapperImpl(this));
+ this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));
}
/**
@@ -413,8 +411,8 @@ Server {
}
- MasterMetrics getMetrics() {
- return metrics;
+ MetricsMaster getMetrics() {
+ return metricsMaster;
}
/**
@@ -523,7 +521,7 @@ Server {
this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
this.loadBalancerTracker.start();
this.assignmentManager = new AssignmentManager(this, serverManager,
- this.catalogTracker, this.balancer, this.executorService, this.metrics);
+ this.catalogTracker, this.balancer, this.executorService, this.metricsMaster);
zooKeeper.registerListenerFirst(assignmentManager);
this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
@@ -627,7 +625,7 @@ Server {
status.setStatus("Initializing Master file system");
this.masterActiveTime = System.currentTimeMillis();
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
- this.fileSystemManager = new MasterFileSystem(this, this, metrics, masterRecovery);
+ this.fileSystemManager = new MasterFileSystem(this, this, metricsMaster, masterRecovery);
this.tableDescriptors =
new FSTableDescriptors(this.fileSystemManager.getFileSystem(),
@@ -1182,9 +1180,9 @@ Server {
try {
HBaseProtos.ServerLoad sl = request.getLoad();
this.serverManager.regionServerReport(ProtobufUtil.toServerName(request.getServer()), new ServerLoad(sl));
- if (sl != null && this.metrics != null) {
+ if (sl != null && this.metricsMaster != null) {
// Up our metrics.
- this.metrics.incrementRequests(sl.getTotalNumberOfRequests());
+ this.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests());
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 8a1bc46..4871413 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -44,10 +44,8 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException;
@@ -69,7 +67,7 @@ public class MasterFileSystem {
// master status
Server master;
// metrics for master
- MasterMetrics metrics;
+ MetricsMaster metricsMaster;
// Persisted unique cluster ID
private ClusterId clusterId;
// Keep around for convenience.
@@ -87,12 +85,12 @@ public class MasterFileSystem {
private final MasterServices services;
public MasterFileSystem(Server master, MasterServices services,
- MasterMetrics metrics, boolean masterRecovery)
+ MetricsMaster metricsMaster, boolean masterRecovery)
throws IOException {
this.conf = master.getConfiguration();
this.master = master;
this.services = services;
- this.metrics = metrics;
+ this.metricsMaster = metricsMaster;
// Set filesystem to be that of this.rootdir else we get complaints about
// mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
// default localfs. Presumption is that rootdir is fully-qualified before
@@ -317,8 +315,8 @@ public class MasterFileSystem {
}
}
- if (this.metrics != null) {
- this.metrics.addSplit(splitTime, splitLogSize);
+ if (this.metricsMaster != null) {
+ this.metricsMaster.addSplit(splitTime, splitLogSize);
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
new file mode 100644
index 0000000..578bca4
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSource;
+import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterWrapper;
+
+/**
+ * This class is for maintaining the various master statistics
+ * and publishing them through the metrics interfaces.
+ *
+ * This class has a number of metrics variables that are publicly accessible;
+ * these variables (objects) have methods to update their values.
+ */
+@InterfaceStability.Evolving
+@InterfaceAudience.Private
+public class MetricsMaster {
+ private final Log LOG = LogFactory.getLog(this.getClass());
+ private MetricsMasterSource masterSource;
+
+ public MetricsMaster(MetricsMasterWrapper masterWrapper) {
+ masterSource = CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class).create(masterWrapper);
+ }
+
+ // for unit-test usage
+ public MetricsMasterSource getMetricsSource() {
+ return masterSource;
+ }
+
+ /**
+ * Record a single instance of a split
+ * @param time time that the split took
+ * @param size length of original HLogs that were split
+ */
+ public synchronized void addSplit(long time, long size) {
+ masterSource.updateSplitTime(time);
+ masterSource.updateSplitSize(size);
+ }
+
+ /**
+ * @param inc How much to add to requests.
+ */
+ public void incrementRequests(final int inc) {
+ masterSource.incRequests(inc);
+
+ }
+
+ /**
+ * set new value for number of regions in transition.
+ * @param ritCount
+ */
+ public void updateRITCount(int ritCount) {
+ masterSource.setRIT(ritCount);
+ }
+
+ /**
+ * update RIT count that are in this state for more than the threshold
+ * as defined by the property rit.metrics.threshold.time.
+ * @param ritCountOverThreshold
+ */
+ public void updateRITCountOverThreshold(int ritCountOverThreshold) {
+ masterSource.setRITCountOverThreshold(ritCountOverThreshold);
+ }
+ /**
+ * update the timestamp for oldest region in transition metrics.
+ * @param timestamp
+ */
+ public void updateRITOldestAge(long timestamp) {
+ masterSource.setRITOldestAge(timestamp);
+ }
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
new file mode 100644
index 0000000..081afc6
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MetricsMasterWrapper;
+
+/**
+ * Impl for exposing HMaster Information through JMX
+ */
+public class MetricsMasterWrapperImpl implements MetricsMasterWrapper {
+
+ private final HMaster master;
+
+ public MetricsMasterWrapperImpl(final HMaster master) {
+ this.master = master;
+ }
+
+ @Override
+ public double getAverageLoad() {
+ return master.getAverageLoad();
+ }
+
+ @Override
+ public String getClusterId() {
+ return master.getClusterId();
+ }
+
+ @Override
+ public String getZookeeperQuorum() {
+ return master.getZooKeeperWatcher().getQuorum();
+ }
+
+ @Override
+ public String[] getCoprocessors() {
+ return master.getCoprocessors();
+ }
+
+ @Override
+ public long getMasterStartTime() {
+ return master.getMasterStartTime();
+ }
+
+ @Override
+ public long getMasterActiveTime() {
+ return master.getMasterActiveTime();
+ }
+
+ @Override
+ public int getRegionServers() {
+ return this.master.getServerManager().getOnlineServers().size();
+ }
+
+ @Override
+ public int getDeadRegionServers() {
+ return master.getServerManager().getDeadServers().size();
+ }
+
+ @Override
+ public String getServerName() {
+ return master.getServerName().getServerName();
+ }
+
+ @Override
+ public boolean getIsActiveMaster() {
+ return master.isActiveMaster();
+ }
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java
deleted file mode 100644
index 603d3e9..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.metrics;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-
-/**
- * This class is for maintaining the various master statistics
- * and publishing them through the metrics interfaces.
- *
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values.
- */
-@InterfaceStability.Evolving
-@InterfaceAudience.Private
-public class MasterMetrics {
- private final Log LOG = LogFactory.getLog(this.getClass());
- private MasterMetricsSource masterMetricsSource;
-
- public MasterMetrics(MasterMetricsWrapper masterWrapper) {
- masterMetricsSource = CompatibilitySingletonFactory.getInstance(MasterMetricsSourceFactory.class).create(masterWrapper);
- }
-
- // for unit-test usage
- public MasterMetricsSource getMetricsSource() {
- return masterMetricsSource;
- }
-
- /**
- * Record a single instance of a split
- * @param time time that the split took
- * @param size length of original HLogs that were split
- */
- public synchronized void addSplit(long time, long size) {
- masterMetricsSource.updateSplitTime(time);
- masterMetricsSource.updateSplitSize(size);
- }
-
- /**
- * @param inc How much to add to requests.
- */
- public void incrementRequests(final int inc) {
- masterMetricsSource.incRequests(inc);
-
- }
-
- /**
- * set new value for number of regions in transition.
- * @param ritCount
- */
- public void updateRITCount(int ritCount) {
- masterMetricsSource.setRIT(ritCount);
- }
-
- /**
- * update RIT count that are in this state for more than the threshold
- * as defined by the property rit.metrics.threshold.time.
- * @param ritCountOverThreshold
- */
- public void updateRITCountOverThreshold(int ritCountOverThreshold) {
- masterMetricsSource.setRITCountOverThreshold(ritCountOverThreshold);
- }
- /**
- * update the timestamp for oldest region in transition metrics.
- * @param timestamp
- */
- public void updateRITOldestAge(long timestamp) {
- masterMetricsSource.setRITOldestAge(timestamp);
- }
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapperImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapperImpl.java
deleted file mode 100644
index 3a58986..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapperImpl.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.metrics;
-
-import org.apache.hadoop.hbase.master.HMaster;
-
-/**
- * Impl for exposing HMaster Information through JMX
- */
-public class MasterMetricsWrapperImpl implements MasterMetricsWrapper {
-
- private final HMaster master;
-
- public MasterMetricsWrapperImpl(final HMaster master) {
- this.master = master;
- }
-
- @Override
- public double getAverageLoad() {
- return master.getAverageLoad();
- }
-
- @Override
- public String getClusterId() {
- return master.getClusterId();
- }
-
- @Override
- public String getZookeeperQuorum() {
- return master.getZooKeeperWatcher().getQuorum();
- }
-
- @Override
- public String[] getCoprocessors() {
- return master.getCoprocessors();
- }
-
- @Override
- public long getMasterStartTime() {
- return master.getMasterStartTime();
- }
-
- @Override
- public long getMasterActiveTime() {
- return master.getMasterActiveTime();
- }
-
- @Override
- public int getRegionServers() {
- return this.master.getServerManager().getOnlineServers().size();
- }
-
- @Override
- public int getDeadRegionServers() {
- return master.getServerManager().getDeadServers().size();
- }
-
- @Override
- public String getServerName() {
- return master.getServerName().getServerName();
- }
-
- @Override
- public boolean getIsActiveMaster() {
- return master.isActiveMaster();
- }
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index eea848f..4de19c3 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -81,7 +81,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
@@ -116,11 +115,7 @@ import org.apache.hadoop.hbase.ipc.HBaseServer;
import org.apache.hadoop.hbase.ipc.RpcCallContext;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.metrics.OperationMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
@@ -235,16 +230,21 @@ public class HRegion implements HeapSize { // , Writable{
// private int [] storeSize = null;
// private byte [] name = null;
- final AtomicLong memstoreSize = new AtomicLong(0);
+ public final AtomicLong memstoreSize = new AtomicLong(0);
// Debug possible data loss due to WAL off
- final AtomicLong numPutsWithoutWAL = new AtomicLong(0);
- final AtomicLong dataInMemoryWithoutWAL = new AtomicLong(0);
+ final Counter numPutsWithoutWAL = new Counter();
+ final Counter dataInMemoryWithoutWAL = new Counter();
+ // Debug why CAS operations are taking a while.
final Counter checkAndMutateChecksPassed = new Counter();
final Counter checkAndMutateChecksFailed = new Counter();
+
+ //Number of requests
final Counter readRequestsCount = new Counter();
final Counter writeRequestsCount = new Counter();
+
+ //How long operations were blocked by a memstore over highwater.
final Counter updatesBlockedMs = new Counter();
/**
@@ -362,7 +362,7 @@ public class HRegion implements HeapSize { // , Writable{
public final static String REGIONINFO_FILE = ".regioninfo";
private HTableDescriptor htableDescriptor = null;
private RegionSplitPolicy splitPolicy;
- private final OperationMetrics opMetrics;
+ private final MetricsRegion metricsRegion;
/**
* Should only be used for testing purposes
@@ -386,7 +386,7 @@ public class HRegion implements HeapSize { // , Writable{
this.coprocessorHost = null;
this.scannerReadPoints = new ConcurrentHashMap();
- this.opMetrics = new OperationMetrics();
+ this.metricsRegion = new MetricsRegion(new MetricsRegionWrapperImpl(this));
}
/**
@@ -449,7 +449,7 @@ public class HRegion implements HeapSize { // , Writable{
this.regiondir = getRegionDir(this.tableDir, encodedNameStr);
this.scannerReadPoints = new ConcurrentHashMap();
- this.opMetrics = new OperationMetrics(conf, this.regionInfo);
+ this.metricsRegion = new MetricsRegion(new MetricsRegionWrapperImpl(this));
/*
* timestamp.slop provides a server-side constraint on the timestamp. This
@@ -1023,7 +1023,7 @@ public class HRegion implements HeapSize { // , Writable{
status.setStatus("Running coprocessor post-close hooks");
this.coprocessorHost.postClose(abort);
}
- this.opMetrics.closeMetrics();
+ this.metricsRegion.close();
status.markComplete("Closed");
LOG.info("Closed " + this);
return result;
@@ -1723,7 +1723,6 @@ public class HRegion implements HeapSize { // , Writable{
protected RegionScanner getScanner(Scan scan,
List additionalScanners) throws IOException {
startRegionOperation();
- this.readRequestsCount.increment();
try {
// Verify families are all valid
prepareScanner(scan);
@@ -2322,26 +2321,20 @@ public class HRegion implements HeapSize { // , Writable{
}
}
- // do after lock
- final long netTimeMs = EnvironmentEdgeManager.currentTimeMillis() - startTimeMs;
-
// See if the column families were consistent through the whole thing.
// if they were then keep them. If they were not then pass a null.
// null will be treated as unknown.
// Total time taken might be involving Puts and Deletes.
// Split the time for puts and deletes based on the total number of Puts and Deletes.
- long timeTakenForPuts = 0;
+
if (noOfPuts > 0) {
// There were some Puts in the batch.
double noOfMutations = noOfPuts + noOfDeletes;
- timeTakenForPuts = (long) (netTimeMs * (noOfPuts / noOfMutations));
- final Set keptCfs = putsCfSetConsistent ? putsCfSet : null;
- this.opMetrics.updateMultiPutMetrics(keptCfs, timeTakenForPuts);
+ this.metricsRegion.updatePut();
}
if (noOfDeletes > 0) {
// There were some Deletes in the batch.
- final Set keptCfs = deletesCfSetConsistent ? deletesCfSet : null;
- this.opMetrics.updateMultiDeleteMetrics(keptCfs, netTimeMs - timeTakenForPuts);
+ this.metricsRegion.updateDelete();
}
if (!success) {
for (int i = firstIndex; i < lastIndexExclusive; i++) {
@@ -3177,7 +3170,7 @@ public class HRegion implements HeapSize { // , Writable{
/**
* See if row is currently locked.
- * @param lockid
+ * @param lockId
* @return boolean
*/
boolean isRowLocked(final Integer lockId) {
@@ -4246,7 +4239,6 @@ public class HRegion implements HeapSize { // , Writable{
*/
private List get(Get get, boolean withCoprocessor)
throws IOException {
- long now = EnvironmentEdgeManager.currentTimeMillis();
List results = new ArrayList();
@@ -4262,7 +4254,7 @@ public class HRegion implements HeapSize { // , Writable{
RegionScanner scanner = null;
try {
scanner = getScanner(scan);
- scanner.next(results, SchemaMetrics.METRIC_GETSIZE);
+ scanner.next(results);
} finally {
if (scanner != null)
scanner.close();
@@ -4274,8 +4266,8 @@ public class HRegion implements HeapSize { // , Writable{
}
// do after lock
- final long after = EnvironmentEdgeManager.currentTimeMillis();
- this.opMetrics.updateGetMetrics(get.familySet(), after - now);
+
+ this.metricsRegion.updateGet();
return results;
}
@@ -4348,15 +4340,11 @@ public class HRegion implements HeapSize { // , Writable{
processor.postProcess(this, walEdit);
} catch (IOException e) {
long endNanoTime = System.nanoTime();
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".error.nano",
- endNanoTime - startNanoTime);
throw e;
} finally {
closeRegionOperation();
}
final long endNanoTime = System.nanoTime();
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".nano",
- endNanoTime - startNanoTime);
return;
}
@@ -4463,8 +4451,6 @@ public class HRegion implements HeapSize { // , Writable{
} catch (IOException e) {
long endNanoTime = System.nanoTime();
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".error.nano",
- endNanoTime - startNanoTime);
throw e;
} finally {
closeRegionOperation();
@@ -4475,20 +4461,6 @@ public class HRegion implements HeapSize { // , Writable{
}
// Populate all metrics
long endNanoTime = System.nanoTime();
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".nano",
- endNanoTime - startNanoTime);
-
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".acquirelock.nano",
- lockedNanoTime - startNanoTime);
-
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".process.nano",
- processDoneNanoTime - lockedNanoTime);
-
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".occupylock.nano",
- unlockedNanoTime - lockedNanoTime);
-
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".sync.nano",
- endNanoTime - unlockedNanoTime);
}
private void doProcessRowWithTimeout(final RowProcessor> processor,
@@ -4565,7 +4537,7 @@ public class HRegion implements HeapSize { // , Writable{
WALEdit walEdits = null;
List allKVs = new ArrayList(append.size());
Map> tempMemstore = new HashMap>();
- long before = EnvironmentEdgeManager.currentTimeMillis();
+
long size = 0;
long txid = 0;
@@ -4682,8 +4654,7 @@ public class HRegion implements HeapSize { // , Writable{
closeRegionOperation();
}
- long after = EnvironmentEdgeManager.currentTimeMillis();
- this.opMetrics.updateAppendMetrics(append.getFamilyMap().keySet(), after - before);
+ this.metricsRegion.updateAppend();
if (flush) {
@@ -4718,7 +4689,7 @@ public class HRegion implements HeapSize { // , Writable{
WALEdit walEdits = null;
List allKVs = new ArrayList(increment.numColumns());
Map> tempMemstore = new HashMap>();
- long before = EnvironmentEdgeManager.currentTimeMillis();
+
long size = 0;
long txid = 0;
@@ -4808,8 +4779,7 @@ public class HRegion implements HeapSize { // , Writable{
}
} finally {
closeRegionOperation();
- long after = EnvironmentEdgeManager.currentTimeMillis();
- this.opMetrics.updateIncrementMetrics(increment.getFamilyMap().keySet(), after - before);
+ this.metricsRegion.updateIncrement();
}
if (flush) {
@@ -5282,7 +5252,8 @@ public class HRegion implements HeapSize { // , Writable{
* These information are exposed by the region server metrics.
*/
private void recordPutWithoutWal(final Map> familyMap) {
- if (numPutsWithoutWAL.getAndIncrement() == 0) {
+ numPutsWithoutWAL.increment();
+ if (numPutsWithoutWAL.get() <= 1) {
LOG.info("writing data to region " + this +
" with WAL disabled. Data may be lost in the event of a crash.");
}
@@ -5294,7 +5265,7 @@ public class HRegion implements HeapSize { // , Writable{
}
}
- dataInMemoryWithoutWAL.addAndGet(putSize);
+ dataInMemoryWithoutWAL.add(putSize);
}
/**
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 06cbcce..38d9bb3 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -48,13 +48,11 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.management.ObjectName;
import com.google.protobuf.Message;
-import org.apache.commons.lang.mutable.MutableDouble;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -67,7 +65,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.FailedSanityCheckException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
@@ -107,9 +104,7 @@ import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
import org.apache.hadoop.hbase.ipc.HBaseRPC;
import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
@@ -191,11 +186,6 @@ import org.apache.hadoop.hbase.regionserver.handler.CloseRootHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenRootHandler;
-import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage;
-import org.apache.hadoop.hbase.regionserver.metrics.RegionServerDynamicMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.StoreMetricType;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
@@ -225,6 +215,7 @@ import org.apache.hadoop.net.DNS;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException;
+import org.cliffc.high_scale_lib.Counter;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.common.base.Function;
@@ -297,9 +288,8 @@ public class HRegionServer implements ClientProtocol,
// Instance of the hbase executor service.
protected ExecutorService service;
- // Request counter.
- // Do we need this? Can't we just sum region counters? St.Ack 20110412
- protected AtomicInteger requestCount = new AtomicInteger();
+ // Request counter. (Includes requests that are not serviced by regions.)
+ final Counter requestCount = new Counter();
// If false, the file system has become unavailable
protected volatile boolean fsOk;
@@ -366,9 +356,7 @@ public class HRegionServer implements ClientProtocol,
*/
private final LinkedList reservedSpace = new LinkedList();
- private RegionServerMetrics metrics;
-
- private RegionServerDynamicMetrics dynamicMetrics;
+ private MetricsRegionServer metricsRegionServer;
/*
* Check for compactions requests.
@@ -403,7 +391,7 @@ public class HRegionServer implements ClientProtocol,
private final RegionServerAccounting regionServerAccounting;
// Cache configuration and block cache reference
- private final CacheConfig cacheConfig;
+ final CacheConfig cacheConfig;
// reference to the Thrift Server.
volatile private HRegionThriftServer thriftServer;
@@ -446,6 +434,11 @@ public class HRegionServer implements ClientProtocol,
*/
private final QosFunction qosFunction;
+ /**
+ * Time stamps for when a hregionserver was started
+ */
+ private long regionServerStartTime;
+
/**
* Starts a HRegionServer at the default location
@@ -550,6 +543,19 @@ public class HRegionServer implements ClientProtocol,
}
}
+
+ /**
+ * @return timestamp in millis when HRegionServer was started.
+ */
+ public long getRegionServerStartTime() {
+ return regionServerStartTime;
+ }
+
+ public String getClusterId() {
+ // TODO: cache in this class field?
+ return this.conf.get(HConstants.CLUSTER_ID);
+ }
+
@Retention(RetentionPolicy.RUNTIME)
protected @interface QosPriority {
int priority() default 0;
@@ -838,6 +844,8 @@ public class HRegionServer implements ClientProtocol,
* The HRegionServer sticks in this loop until closed.
*/
public void run() {
+ regionServerStartTime = System.currentTimeMillis();
+
try {
// Do pre-registration initializations; zookeeper, lease threads, etc.
preRegistrationInitialization();
@@ -858,7 +866,6 @@ public class HRegionServer implements ClientProtocol,
break;
}
}
- registerMBean();
// We registered with the Master. Go into run mode.
long lastMsg = 0;
@@ -893,7 +900,6 @@ public class HRegionServer implements ClientProtocol,
}
long now = System.currentTimeMillis();
if ((now - lastMsg) >= msgInterval) {
- doMetrics();
tryRegionServerReport(lastMsg, now);
lastMsg = System.currentTimeMillis();
}
@@ -1022,8 +1028,6 @@ public class HRegionServer implements ClientProtocol,
void tryRegionServerReport(long reportStartTime, long reportEndTime)
throws IOException {
HBaseProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime);
- // Why we do this?
- this.requestCount.set(0);
try {
RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();
ServerName sn = ServerName.parseVersionedServerName(
@@ -1044,13 +1048,14 @@ public class HRegionServer implements ClientProtocol,
}
HBaseProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) {
+ MetricsRegionServerWrapper regionServerWrapper = this.metricsRegionServer.getRegionServerWrapper();
Collection regions = getOnlineRegionsLocalContext();
MemoryUsage memory =
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
HBaseProtos.ServerLoad.Builder serverLoad = HBaseProtos.ServerLoad.newBuilder();
- serverLoad.setNumberOfRequests((int)metrics.getRequests());
- serverLoad.setTotalNumberOfRequests(requestCount.get());
+ serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond());
+ serverLoad.setTotalNumberOfRequests((int)requestCount.get());
serverLoad.setUsedHeapMB((int)(memory.getUsed() / 1024 / 1024));
serverLoad.setMaxHeapMB((int) (memory.getMax() / 1024 / 1024));
Set coprocessors = this.hlog.getCoprocessorHost().getCoprocessors();
@@ -1205,8 +1210,7 @@ public class HRegionServer implements ClientProtocol,
this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true);
this.hlog = setupWALAndReplication();
// Init in here rather than in constructor after thread name has been set
- this.metrics = new RegionServerMetrics();
- this.dynamicMetrics = RegionServerDynamicMetrics.newInstance();
+ this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this));
startServiceThreads();
LOG.info("Serving as " + this.serverNameFromMasterPOV +
", RPC listening on " + this.isa +
@@ -1441,179 +1445,8 @@ public class HRegionServer implements ClientProtocol,
return hlogRoller;
}
- /*
- * @param interval Interval since last time metrics were called.
- */
- protected void doMetrics() {
- try {
- metrics();
- } catch (Throwable e) {
- LOG.warn("Failed metrics", e);
- }
- }
-
- protected void metrics() {
- this.metrics.regions.set(this.onlineRegions.size());
- this.metrics.incrementRequests(this.requestCount.get());
- this.metrics.requests.intervalHeartBeat();
- // Is this too expensive every three seconds getting a lock on onlineRegions
- // and then per store carried? Can I make metrics be sloppier and avoid
- // the synchronizations?
- int stores = 0;
- int storefiles = 0;
- long memstoreSize = 0;
- int readRequestsCount = 0;
- int writeRequestsCount = 0;
- long checkAndMutateChecksFailed = 0;
- long checkAndMutateChecksPassed = 0;
- long storefileIndexSize = 0;
- HDFSBlocksDistribution hdfsBlocksDistribution =
- new HDFSBlocksDistribution();
- long totalStaticIndexSize = 0;
- long totalStaticBloomSize = 0;
- long numPutsWithoutWAL = 0;
- long dataInMemoryWithoutWAL = 0;
- long updatesBlockedMs = 0;
-
- // Note that this is a map of Doubles instead of Longs. This is because we
- // do effective integer division, which would perhaps truncate more than it
- // should because we do it only on one part of our sum at a time. Rather
- // than dividing at the end, where it is difficult to know the proper
- // factor, everything is exact then truncated.
- final Map tempVals =
- new HashMap();
-
- for (Map.Entry e : this.onlineRegions.entrySet()) {
- HRegion r = e.getValue();
- memstoreSize += r.memstoreSize.get();
- numPutsWithoutWAL += r.numPutsWithoutWAL.get();
- dataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get();
- readRequestsCount += r.readRequestsCount.get();
- writeRequestsCount += r.writeRequestsCount.get();
- checkAndMutateChecksFailed += r.checkAndMutateChecksFailed.get();
- checkAndMutateChecksPassed += r.checkAndMutateChecksPassed.get();
- updatesBlockedMs += r.updatesBlockedMs.get();
- synchronized (r.stores) {
- stores += r.stores.size();
- for (Map.Entry ee : r.stores.entrySet()) {
- final Store store = ee.getValue();
- final SchemaMetrics schemaMetrics = store.getSchemaMetrics();
-
- {
- long tmpStorefiles = store.getStorefilesCount();
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.STORE_FILE_COUNT, tmpStorefiles);
- storefiles += tmpStorefiles;
- }
-
-
- {
- long tmpStorefileIndexSize = store.getStorefilesIndexSize();
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.STORE_FILE_INDEX_SIZE,
- (long) (tmpStorefileIndexSize / (1024.0 * 1024)));
- storefileIndexSize += tmpStorefileIndexSize;
- }
-
- {
- long tmpStorefilesSize = store.getStorefilesSize();
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.STORE_FILE_SIZE_MB,
- (long) (tmpStorefilesSize / (1024.0 * 1024)));
- }
-
- {
- long tmpStaticBloomSize = store.getTotalStaticBloomSize();
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.STATIC_BLOOM_SIZE_KB,
- (long) (tmpStaticBloomSize / 1024.0));
- totalStaticBloomSize += tmpStaticBloomSize;
- }
-
- {
- long tmpStaticIndexSize = store.getTotalStaticIndexSize();
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.STATIC_INDEX_SIZE_KB,
- (long) (tmpStaticIndexSize / 1024.0));
- totalStaticIndexSize += tmpStaticIndexSize;
- }
-
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.MEMSTORE_SIZE_MB,
- (long) (store.getMemStoreSize() / (1024.0 * 1024)));
- }
- }
-
- hdfsBlocksDistribution.add(r.getHDFSBlocksDistribution());
- }
-
- for (Entry e : tempVals.entrySet()) {
- RegionMetricsStorage.setNumericMetric(e.getKey(), e.getValue().longValue());
- }
-
- this.metrics.stores.set(stores);
- this.metrics.storefiles.set(storefiles);
- this.metrics.memstoreSizeMB.set((int) (memstoreSize / (1024 * 1024)));
- this.metrics.mbInMemoryWithoutWAL.set((int) (dataInMemoryWithoutWAL / (1024 * 1024)));
- this.metrics.numPutsWithoutWAL.set(numPutsWithoutWAL);
- this.metrics.storefileIndexSizeMB.set(
- (int) (storefileIndexSize / (1024 * 1024)));
- this.metrics.rootIndexSizeKB.set(
- (int) (storefileIndexSize / 1024));
- this.metrics.totalStaticIndexSizeKB.set(
- (int) (totalStaticIndexSize / 1024));
- this.metrics.totalStaticBloomSizeKB.set(
- (int) (totalStaticBloomSize / 1024));
- this.metrics.readRequestsCount.set(readRequestsCount);
- this.metrics.writeRequestsCount.set(writeRequestsCount);
- this.metrics.checkAndMutateChecksFailed.set(checkAndMutateChecksFailed);
- this.metrics.checkAndMutateChecksPassed.set(checkAndMutateChecksPassed);
- this.metrics.compactionQueueSize.set(compactSplitThread
- .getCompactionQueueSize());
- this.metrics.flushQueueSize.set(cacheFlusher
- .getFlushQueueSize());
- this.metrics.updatesBlockedSeconds.update(updatesBlockedMs > 0 ?
- updatesBlockedMs/1000: 0);
- final long updatesBlockedMsHigherWater = cacheFlusher.getUpdatesBlockedMsHighWater().get();
- this.metrics.updatesBlockedSecondsHighWater.update(updatesBlockedMsHigherWater > 0 ?
- updatesBlockedMsHigherWater/1000: 0);
-
- BlockCache blockCache = cacheConfig.getBlockCache();
- if (blockCache != null) {
- this.metrics.blockCacheCount.set(blockCache.size());
- this.metrics.blockCacheFree.set(blockCache.getFreeSize());
- this.metrics.blockCacheSize.set(blockCache.getCurrentSize());
- CacheStats cacheStats = blockCache.getStats();
- this.metrics.blockCacheHitCount.set(cacheStats.getHitCount());
- this.metrics.blockCacheMissCount.set(cacheStats.getMissCount());
- this.metrics.blockCacheEvictedCount.set(blockCache.getEvictedCount());
- double ratio = blockCache.getStats().getHitRatio();
- int percent = (int) (ratio * 100);
- this.metrics.blockCacheHitRatio.set(percent);
- ratio = blockCache.getStats().getHitCachingRatio();
- percent = (int) (ratio * 100);
- this.metrics.blockCacheHitCachingRatio.set(percent);
- // past N period block cache hit / hit caching ratios
- cacheStats.rollMetricsPeriod();
- ratio = cacheStats.getHitRatioPastNPeriods();
- percent = (int) (ratio * 100);
- this.metrics.blockCacheHitRatioPastNPeriods.set(percent);
- ratio = cacheStats.getHitCachingRatioPastNPeriods();
- percent = (int) (ratio * 100);
- this.metrics.blockCacheHitCachingRatioPastNPeriods.set(percent);
- }
- float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex(
- getServerName().getHostname());
- int percent = (int) (localityIndex * 100);
- this.metrics.hdfsBlocksLocalityIndex.set(percent);
-
- }
-
- /**
- * @return Region server metrics instance.
- */
- public RegionServerMetrics getMetrics() {
- return this.metrics;
+ public MetricsRegionServer getMetrics() {
+ return this.metricsRegionServer;
}
/**
@@ -1841,9 +1674,6 @@ public class HRegionServer implements ClientProtocol,
// java.util.HashSet's toString() method to print the coprocessor names.
LOG.fatal("RegionServer abort: loaded coprocessors are: " +
CoprocessorHost.getLoadedCoprocessors());
- if (this.metrics != null) {
- LOG.info("Dump of metrics: " + this.metrics);
- }
// Do our best to report our abort to the master, but this may not work
try {
if (cause != null) {
@@ -2184,7 +2014,7 @@ public class HRegionServer implements ClientProtocol,
}
/**
- * For tests and web ui.
+ * For tests, web ui and metrics.
* This method will only work if HRegionServer is in the same JVM as client;
* HRegion cannot be serialized to cross an rpc.
* @see #getOnlineRegions()
@@ -2218,11 +2048,6 @@ public class HRegionServer implements ClientProtocol,
return sortedRegions;
}
- /** @return the request count */
- public AtomicInteger getRequestCount() {
- return this.requestCount;
- }
-
/**
* @return time stamp in millis of when this region server was started
*/
@@ -2498,16 +2323,6 @@ public class HRegionServer implements ClientProtocol,
}
/**
- * Register bean with platform management server
- */
- void registerMBean() {
- MXBeanImpl mxBeanInfo = MXBeanImpl.init(this);
- mxBean = MBeanUtil.registerMBean("RegionServer", "RegionServer",
- mxBeanInfo);
- LOG.info("Registered RegionServer MXBean");
- }
-
- /**
* Instantiated as a row lock lease. If the lease times out, the row lock is
* released
*/
@@ -2685,14 +2500,7 @@ public class HRegionServer implements ClientProtocol,
if (destination != null){
addToMovedRegions(encodedRegionName, destination);
}
-
- //Clear all of the dynamic metrics as they are now probably useless.
- //This is a clear because dynamic metrics could include metrics per cf and
- //per hfile. Figuring out which cfs, hfiles, and regions are still relevant to
- //this region server would be an onerous task. Instead just clear everything
- //and on the next tick of the metrics everything that is still relevant will be
- //re-added.
- this.dynamicMetrics.clear();
+
return toReturn != null;
}
@@ -2885,8 +2693,9 @@ public class HRegionServer implements ClientProtocol,
@Override
public GetResponse get(final RpcController controller,
final GetRequest request) throws ServiceException {
+ long before = EnvironmentEdgeManager.currentTimeMillis();
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
GetResponse.Builder builder = GetResponse.newBuilder();
ClientProtos.Get get = request.getGet();
@@ -2926,6 +2735,8 @@ public class HRegionServer implements ClientProtocol,
return builder.build();
} catch (IOException ie) {
throw new ServiceException(ie);
+ } finally {
+ metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTimeMillis() - before);
}
}
@@ -2940,7 +2751,7 @@ public class HRegionServer implements ClientProtocol,
public MutateResponse mutate(final RpcController controller,
final MutateRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
MutateResponse.Builder builder = MutateResponse.newBuilder();
Mutate mutate = request.getMutate();
@@ -3073,7 +2884,7 @@ public class HRegionServer implements ClientProtocol,
}
throw e;
}
- requestCount.incrementAndGet();
+ requestCount.increment();
try {
int ttl = 0;
@@ -3167,7 +2978,7 @@ public class HRegionServer implements ClientProtocol,
for (int i = 0; i < rows
&& currentScanResultSize < maxResultSize; i++) {
// Collect values to be returned here
- boolean moreRows = scanner.next(values, SchemaMetrics.METRIC_NEXTSIZE);
+ boolean moreRows = scanner.next(values);
if (!values.isEmpty()) {
for (KeyValue kv : values) {
currentScanResultSize += kv.heapSize();
@@ -3261,7 +3072,7 @@ public class HRegionServer implements ClientProtocol,
throw new DoNotRetryIOException(
"lockRow supports only one row now, not " + request.getRowCount() + " rows");
}
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
byte[] row = request.getRow(0).toByteArray();
try {
@@ -3292,7 +3103,7 @@ public class HRegionServer implements ClientProtocol,
public UnlockRowResponse unlockRow(final RpcController controller,
final UnlockRowRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
if (!request.hasLockId()) {
throw new DoNotRetryIOException(
@@ -3327,7 +3138,7 @@ public class HRegionServer implements ClientProtocol,
public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller,
final BulkLoadHFileRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
List> familyPaths = new ArrayList>();
for (FamilyPath familyPath: request.getFamilyPathList()) {
@@ -3374,7 +3185,7 @@ public class HRegionServer implements ClientProtocol,
public ExecCoprocessorResponse execCoprocessor(final RpcController controller,
final ExecCoprocessorRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
ExecCoprocessorResponse.Builder
builder = ExecCoprocessorResponse.newBuilder();
@@ -3392,7 +3203,7 @@ public class HRegionServer implements ClientProtocol,
public CoprocessorServiceResponse execService(final RpcController controller,
final CoprocessorServiceRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
// ignore the passed in controller (from the serialized call)
ServerRpcController execController = new ServerRpcController();
@@ -3441,7 +3252,7 @@ public class HRegionServer implements ClientProtocol,
ActionResult.Builder resultBuilder = null;
List mutates = new ArrayList();
for (ClientProtos.MultiAction actionUnion : request.getActionList()) {
- requestCount.incrementAndGet();
+ requestCount.increment();
try {
Object result = null;
if (actionUnion.hasGet()) {
@@ -3524,7 +3335,7 @@ public class HRegionServer implements ClientProtocol,
final GetRegionInfoRequest request) throws ServiceException {
try {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
HRegionInfo info = region.getRegionInfo();
GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
@@ -3544,7 +3355,7 @@ public class HRegionServer implements ClientProtocol,
final GetStoreFileRequest request) throws ServiceException {
try {
HRegion region = getRegion(request.getRegion());
- requestCount.incrementAndGet();
+ requestCount.increment();
Set columnFamilies = null;
if (request.getFamilyCount() == 0) {
columnFamilies = region.getStores().keySet();
@@ -3571,7 +3382,7 @@ public class HRegionServer implements ClientProtocol,
final GetOnlineRegionRequest request) throws ServiceException {
try {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
List list = new ArrayList(onlineRegions.size());
for (HRegion region: this.onlineRegions.values()) {
list.add(region.getRegionInfo());
@@ -3602,7 +3413,7 @@ public class HRegionServer implements ClientProtocol,
} catch (IOException ie) {
throw new ServiceException(ie);
}
- requestCount.incrementAndGet();
+ requestCount.increment();
OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
int regionCount = request.getOpenInfoCount();
Map htds =
@@ -3694,7 +3505,6 @@ public class HRegionServer implements ClientProtocol,
try {
checkOpen();
- requestCount.incrementAndGet();
String encodedRegionName =
ProtobufUtil.getRegionEncodedName(request.getRegion());
byte[] encodedName = Bytes.toBytes(encodedRegionName);
@@ -3706,6 +3516,7 @@ public class HRegionServer implements ClientProtocol,
checkIfRegionInTransition(encodedName, CLOSE);
}
HRegion region = getRegionByEncodedName(encodedRegionName);
+ requestCount.increment();
LOG.info("Received close region: " + region.getRegionNameAsString() +
". Version of ZK closing node:" + versionOfClosingNode +
". Destination server:" + sn);
@@ -3734,7 +3545,7 @@ public class HRegionServer implements ClientProtocol,
final FlushRegionRequest request) throws ServiceException {
try {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
LOG.info("Flushing " + region.getRegionNameAsString());
boolean shouldFlush = true;
@@ -3765,7 +3576,7 @@ public class HRegionServer implements ClientProtocol,
final SplitRegionRequest request) throws ServiceException {
try {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
LOG.info("Splitting " + region.getRegionNameAsString());
region.flushcache();
@@ -3794,7 +3605,7 @@ public class HRegionServer implements ClientProtocol,
final CompactRegionRequest request) throws ServiceException {
try {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
LOG.info("Compacting " + region.getRegionNameAsString());
boolean major = false;
@@ -3829,7 +3640,7 @@ public class HRegionServer implements ClientProtocol,
try {
if (replicationSinkHandler != null) {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
HLog.Entry[] entries = ProtobufUtil.toHLogEntries(request.getEntryList());
if (entries != null && entries.length > 0) {
replicationSinkHandler.replicateLogEntries(entries);
@@ -3852,7 +3663,7 @@ public class HRegionServer implements ClientProtocol,
public RollWALWriterResponse rollWALWriter(final RpcController controller,
final RollWALWriterRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HLog wal = this.getWAL();
byte[][] regionsToFlush = wal.rollWriter(true);
RollWALWriterResponse.Builder builder = RollWALWriterResponse.newBuilder();
@@ -3877,7 +3688,7 @@ public class HRegionServer implements ClientProtocol,
@Override
public StopServerResponse stopServer(final RpcController controller,
final StopServerRequest request) throws ServiceException {
- requestCount.incrementAndGet();
+ requestCount.increment();
String reason = request.getReason();
stop(reason);
return StopServerResponse.newBuilder().build();
@@ -3894,7 +3705,7 @@ public class HRegionServer implements ClientProtocol,
public GetServerInfoResponse getServerInfo(final RpcController controller,
final GetServerInfoRequest request) throws ServiceException {
ServerName serverName = getServerName();
- requestCount.incrementAndGet();
+ requestCount.increment();
return ResponseConverter.buildGetServerInfoResponse(serverName, webuiport);
}
@@ -3924,6 +3735,7 @@ public class HRegionServer implements ClientProtocol,
*/
protected Result append(final HRegion region,
final Mutate mutate) throws IOException {
+ long before = EnvironmentEdgeManager.currentTimeMillis();
Append append = ProtobufUtil.toAppend(mutate);
Result r = null;
if (region.getCoprocessorHost() != null) {
@@ -3936,6 +3748,7 @@ public class HRegionServer implements ClientProtocol,
region.getCoprocessorHost().postAppend(append, r);
}
}
+ metricsRegionServer.updateAppend(EnvironmentEdgeManager.currentTimeMillis() - before);
return r;
}
@@ -3949,6 +3762,7 @@ public class HRegionServer implements ClientProtocol,
*/
protected Result increment(final HRegion region,
final Mutate mutate) throws IOException {
+ long before = EnvironmentEdgeManager.currentTimeMillis();
Increment increment = ProtobufUtil.toIncrement(mutate);
Result r = null;
if (region.getCoprocessorHost() != null) {
@@ -3961,6 +3775,7 @@ public class HRegionServer implements ClientProtocol,
r = region.getCoprocessorHost().postIncrement(increment, r);
}
}
+ metricsRegionServer.updateIncrement(EnvironmentEdgeManager.currentTimeMillis() - before);
return r;
}
@@ -3975,7 +3790,8 @@ public class HRegionServer implements ClientProtocol,
final HRegion region, final List mutates) {
@SuppressWarnings("unchecked")
Pair[] mutationsWithLocks = new Pair[mutates.size()];
-
+ long before = EnvironmentEdgeManager.currentTimeMillis();
+ boolean batchContainsPuts = false, batchContainsDelete = false;
try {
ActionResult.Builder resultBuilder = ActionResult.newBuilder();
NameBytesPair value = ProtobufUtil.toParameter(new Result());
@@ -3987,15 +3803,18 @@ public class HRegionServer implements ClientProtocol,
Mutation mutation = null;
if (m.getMutateType() == MutateType.PUT) {
mutation = ProtobufUtil.toPut(m);
+ batchContainsPuts = true;
} else {
mutation = ProtobufUtil.toDelete(m);
+ batchContainsDelete = true;
}
Integer lock = getLockFromId(mutation.getLockId());
mutationsWithLocks[i++] = new Pair(mutation, lock);
builder.addResult(result);
}
- requestCount.addAndGet(mutates.size());
+
+ requestCount.add(mutates.size());
if (!region.getRegionInfo().isMetaTable()) {
cacheFlusher.reclaimMemStoreMemory();
}
@@ -4031,6 +3850,13 @@ public class HRegionServer implements ClientProtocol,
builder.setResult(i, result);
}
}
+ long after = EnvironmentEdgeManager.currentTimeMillis();
+ if (batchContainsPuts) {
+ metricsRegionServer.updatePut(after - before);
+ }
+ if (batchContainsDelete) {
+ metricsRegionServer.updateDelete(after - before);
+ }
}
/**
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 32d83f9..85b693d 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -66,8 +66,6 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.ClassSize;
@@ -106,7 +104,7 @@ import com.google.common.collect.Lists;
* not be called directly but by an HRegion manager.
*/
@InterfaceAudience.Private
-public class HStore extends SchemaConfigured implements Store {
+public class HStore implements Store {
static final Log LOG = LogFactory.getLog(HStore.class);
protected final MemStore memstore;
@@ -174,9 +172,7 @@ public class HStore extends SchemaConfigured implements Store {
protected HStore(Path basedir, HRegion region, HColumnDescriptor family,
FileSystem fs, Configuration confParam)
throws IOException {
- super(new CompoundConfiguration().add(confParam).add(
- family.getValues()), region.getRegionInfo().getTableNameAsString(),
- Bytes.toString(family.getName()));
+
HRegionInfo info = region.getRegionInfo();
this.fs = fs;
// Assemble the store's home directory.
@@ -260,6 +256,15 @@ public class HStore extends SchemaConfigured implements Store {
return ttl;
}
+ public String getColumnFamilyName() {
+ return this.family.getNameAsString();
+ }
+
+ @Override
+ public String getTableName() {
+ return this.region.getTableDesc().getNameAsString();
+ }
+
/**
* Create this store's homedir
* @param fs
@@ -414,7 +419,6 @@ public class HStore extends SchemaConfigured implements Store {
public StoreFile call() throws IOException {
StoreFile storeFile = new StoreFile(fs, p, conf, cacheConf,
family.getBloomFilterType(), dataBlockEncoder);
- passSchemaMetricsTo(storeFile);
storeFile.createReader();
return storeFile;
}
@@ -573,7 +577,6 @@ public class HStore extends SchemaConfigured implements Store {
StoreFile sf = new StoreFile(fs, dstPath, this.conf, this.cacheConf,
this.family.getBloomFilterType(), this.dataBlockEncoder);
- passSchemaMetricsTo(sf);
StoreFile.Reader r = sf.createReader();
this.storeSize += r.length();
@@ -817,19 +820,11 @@ public class HStore extends SchemaConfigured implements Store {
status.setStatus("Flushing " + this + ": reopening flushed file");
StoreFile sf = new StoreFile(this.fs, dstPath, this.conf, this.cacheConf,
this.family.getBloomFilterType(), this.dataBlockEncoder);
- passSchemaMetricsTo(sf);
StoreFile.Reader r = sf.createReader();
this.storeSize += r.length();
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
- // This increments the metrics associated with total flushed bytes for this
- // family. The overall flush count is stored in the static metrics and
- // retrieved from HRegion.recentFlushes, which is set within
- // HRegion.internalFlushcache, which indirectly calls this to actually do
- // the flushing through the StoreFlusherImpl class
- getSchemaMetrics().updatePersistentStoreMetric(
- SchemaMetrics.StoreMetricType.FLUSH_SIZE, flushedSize.longValue());
if (LOG.isInfoEnabled()) {
LOG.info("Added " + sf + ", entries=" + r.getEntries() +
", sequenceid=" + logCacheFlushId +
@@ -875,11 +870,6 @@ public class HStore extends SchemaConfigured implements Store {
.withBytesPerChecksum(bytesPerChecksum)
.withCompression(compression)
.build();
- // The store file writer's path does not include the CF name, so we need
- // to configure the HFile writer directly.
- SchemaConfigured sc = (SchemaConfigured) w.writer;
- SchemaConfigured.resetSchemaMetricsConf(sc);
- passSchemaMetricsTo(sc);
return w;
}
@@ -1409,8 +1399,8 @@ public class HStore extends SchemaConfigured implements Store {
(forcemajor || isMajorCompaction(compactSelection.getFilesToCompact())) &&
(compactSelection.getFilesToCompact().size() < this.maxFilesToCompact
);
- LOG.debug(this.getHRegionInfo().getEncodedName() + " - " +
- this.getColumnFamilyName() + ": Initiating " +
+ LOG.debug(this.getHRegionInfo().getEncodedName() + " - "
+ + this.getColumnFamilyName() + ": Initiating " +
(majorcompaction ? "major" : "minor") + "compaction");
if (!majorcompaction &&
@@ -1523,7 +1513,6 @@ public class HStore extends SchemaConfigured implements Store {
storeFile = new StoreFile(this.fs, path, this.conf,
this.cacheConf, this.family.getBloomFilterType(),
NoOpDataBlockEncoder.INSTANCE);
- passSchemaMetricsTo(storeFile);
storeFile.createReader();
} catch (IOException e) {
LOG.error("Failed to open store file : " + path
@@ -1575,7 +1564,6 @@ public class HStore extends SchemaConfigured implements Store {
}
result = new StoreFile(this.fs, destPath, this.conf, this.cacheConf,
this.family.getBloomFilterType(), this.dataBlockEncoder);
- passSchemaMetricsTo(result);
result.createReader();
}
try {
@@ -1936,7 +1924,7 @@ public class HStore extends SchemaConfigured implements Store {
@Override
public String toString() {
- return getColumnFamilyName();
+ return this.getColumnFamilyName();
}
@Override
@@ -2125,9 +2113,8 @@ public class HStore extends SchemaConfigured implements Store {
}
public static final long FIXED_OVERHEAD =
- ClassSize.align(SchemaConfigured.SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE +
- + (17 * ClassSize.REFERENCE) + (6 * Bytes.SIZEOF_LONG)
- + (5 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN);
+ ClassSize.align((19 * ClassSize.REFERENCE) + (6 * Bytes.SIZEOF_LONG)
+ + (5 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN);
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD
+ ClassSize.OBJECT + ClassSize.REENTRANT_LOCK
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBean.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBean.java
deleted file mode 100644
index b0a92c5..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBean.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
-
-/**
- * This is the JMX management interface for HBase Region Server information
- */
-@Evolving
-public interface MXBean {
-
- /**
- * Return RegionServer's ServerName
- * @return ServerName
- */
- public String getServerName();
-
- /**
- * Get loaded co-processors
- * @return Loaded Co-processors
- */
- public String[] getCoprocessors();
-
- /**
- * Get Zookeeper Quorum
- * @return Comma-separated list of Zookeeper Quorum servers
- */
- public String getZookeeperQuorum();
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBeanImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBeanImpl.java
deleted file mode 100644
index 78f3b6f..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBeanImpl.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-/**
- * Impl for exposing Region Server Information through JMX
- */
-public class MXBeanImpl implements MXBean {
-
- private final HRegionServer regionServer;
-
- private static MXBeanImpl instance = null;
- public synchronized static MXBeanImpl init(final HRegionServer rs){
- if (instance == null) {
- instance = new MXBeanImpl(rs);
- }
- return instance;
- }
-
- protected MXBeanImpl(final HRegionServer rs) {
- this.regionServer = rs;
- }
-
- @Override
- public String[] getCoprocessors() {
- return regionServer.getCoprocessors();
- }
-
- @Override
- public String getZookeeperQuorum() {
- return regionServer.getZooKeeper().getQuorum();
- }
-
- @Override
- public String getServerName() {
- return regionServer.getServerName().getServerName();
- }
-
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 9300fc7..2fcf4cd 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -416,7 +416,6 @@ class MemStoreFlusher extends HasThread implements FlushRequester {
server.compactSplitThread.requestCompaction(region, getName());
}
- server.getMetrics().addFlush(region.getRecentFlushInfo());
} catch (DroppedSnapshotException ex) {
// Cache flush can fail in a few places. If it fails in a critical
// section, we get a DroppedSnapshotException and a replay of hlog
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java
new file mode 100644
index 0000000..5dba749
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.CompatibilityFactory;
+import org.apache.hadoop.hbase.metrics.BaseSource;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionWrapper;
+import org.apache.hadoop.hbase.regionserver.MerticsRegionSource;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory;
+
+@InterfaceAudience.Private
+public class MetricsRegion {
+
+ private MerticsRegionSource merticsRegionSource;
+
+ public MetricsRegion(MetricsRegionWrapper wrapper) {
+ merticsRegionSource = CompatibilityFactory.getInstance(MetricsRegionServerSourceFactory.class)
+ .createRegion(wrapper);
+ }
+
+ public void close() {
+ merticsRegionSource.close();
+ }
+
+ public void updatePut() {
+ merticsRegionSource.updatePut();
+ }
+
+ public void updateDelete() {
+ merticsRegionSource.updateDelete();
+ }
+
+ public void updateGet() {
+ merticsRegionSource.updateGet();
+ }
+
+ public void updateAppend() {
+ merticsRegionSource.updateAppend();
+ }
+
+ public void updateIncrement() {
+ merticsRegionSource.updateIncrement();
+ }
+
+ MerticsRegionSource getSource() {
+ return merticsRegionSource;
+ }
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
new file mode 100644
index 0000000..5ae82a8
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory;
+
+/**
+ * This class is for maintaining the various regionserver statistics
+ * and publishing them through the metrics interfaces.
+ *
+ * This class has a number of metrics variables that are publicly accessible;
+ * these variables (objects) have methods to update their values.
+ */
+@InterfaceStability.Evolving
+@InterfaceAudience.Private
+public class MetricsRegionServer {
+ private final Log LOG = LogFactory.getLog(this.getClass());
+ private MetricsRegionServerSource generalSource;
+ private MetricsRegionServerWrapper regionServerWrapper;
+
+ public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper) {
+ this.regionServerWrapper = regionServerWrapper;
+ generalSource =
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
+ .createGeneral(regionServerWrapper);
+ }
+
+ // for unit-test usage
+ public MetricsRegionServerSource getMetricsSource() {
+ return generalSource;
+ }
+
+ public MetricsRegionServerWrapper getRegionServerWrapper() {
+ return regionServerWrapper;
+ }
+
+ public void updatePut(long t){
+ generalSource.updatePut(t);
+ }
+
+ public void updateDelete(long t){
+ generalSource.updateDelete(t);
+ }
+
+ public void updateGet(long t){
+ generalSource.updateGet(t);
+ }
+
+ public void updateIncrement(long t){
+ generalSource.updateIncrement(t);
+ }
+
+ public void updateAppend(long t){
+ generalSource.updateAppend(t);
+ }
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
new file mode 100644
index 0000000..0ce5708
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -0,0 +1,340 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.HDFSBlocksDistribution;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.CacheStats;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.metrics2.MetricsExecutor;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Impl for exposing HRegionServer Information through JMX
+ */
+@InterfaceAudience.Private class MetricsRegionServerWrapperImpl
+ implements MetricsRegionServerWrapper {
+
+ public static final Log LOG = LogFactory.getLog(MetricsRegionServerWrapperImpl.class);
+
+ public static final int PERIOD = 15;
+
+ private final HRegionServer regionServer;
+ private final BlockCache blockCache;
+
+ private volatile long numStores = 0;
+ private volatile long numStoreFiles = 0;
+ private volatile long memstoreSize = 0;
+ private volatile long storeFileSize = 0;
+ private volatile double requestsPerSecond = 0.0;
+ private volatile long readRequestsCount = 0;
+ private volatile long writeRequestsCount = 0;
+ private volatile long checkAndMutateChecksFailed = 0;
+ private volatile long checkAndMutateChecksPassed = 0;
+ private volatile long storefileIndexSize = 0;
+ private volatile long totalStaticIndexSize = 0;
+ private volatile long totalStaticBloomSize = 0;
+ private volatile long numPutsWithoutWAL = 0;
+ private volatile long dataInMemoryWithoutWAL = 0;
+ private volatile int percentFileLocal = 0;
+
+ private CacheStats cacheStats;
+ private ScheduledExecutorService executor;
+ private Runnable runnable;
+
+ public MetricsRegionServerWrapperImpl(final HRegionServer regionServer) {
+ this.regionServer = regionServer;
+ this.blockCache = this.regionServer.cacheConfig.getBlockCache();
+ this.cacheStats = blockCache.getStats();
+ this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
+ this.runnable = new RegionServerMetricsWrapperRunnable();
+ this.executor.scheduleWithFixedDelay(this.runnable, PERIOD, PERIOD, TimeUnit.SECONDS);
+ }
+
+ @Override
+ public String getClusterId() {
+ return regionServer.getClusterId();
+ }
+
+ @Override
+ public long getRegionServerStartTime() {
+ return regionServer.getRegionServerStartTime();
+ }
+
+ @Override
+ public String getZookeeperQuorum() {
+ return regionServer.getZooKeeperWatcher().getQuorum();
+ }
+
+ @Override
+ public String getCoprocessors() {
+ return StringUtils.join(regionServer.getCoprocessors(), ", ");
+ }
+
+ @Override
+ public String getServerName() {
+ return regionServer.getServerName().getServerName();
+ }
+
+ @Override
+ public long getNumOnlineRegions() {
+ return regionServer.getOnlineRegionsLocalContext().size();
+ }
+
+ @Override
+ public long getTotalRequestCount() {
+ return regionServer.requestCount.get();
+ }
+
+ @Override
+ public int getCompactionQueueSize() {
+ return this.regionServer.compactSplitThread.getCompactionQueueSize();
+ }
+
+ @Override
+ public int getFlushQueueSize() {
+ return this.regionServer.cacheFlusher.getFlushQueueSize();
+ }
+
+ @Override
+ public long getBlockCacheCount() {
+ return this.blockCache.size();
+ }
+
+ @Override
+ public long getBlockCacheSize() {
+ return this.blockCache.getCurrentSize();
+ }
+
+ @Override
+ public long getBlockCacheFreeSize() {
+ return this.blockCache.getFreeSize();
+ }
+
+ @Override
+ public long getBlockCacheHitCount() {
+ return this.cacheStats.getHitCount();
+ }
+
+ @Override
+ public long getBlockCacheMissCount() {
+ return this.cacheStats.getMissCount();
+ }
+
+ @Override
+ public long getBlockCacheEvictedCount() {
+ return this.cacheStats.getEvictedCount();
+ }
+
+ @Override
+ public int getBlockCacheHitPercent() {
+ return (int) (this.cacheStats.getHitRatio() * 100);
+ }
+
+ @Override
+ public int getBlockCacheHitCachingPercent() {
+ return (int) (blockCache.getStats().getHitCachingRatio() * 100);
+ }
+
+ @Override public void forceRecompute() {
+ this.runnable.run();
+ }
+
+ @Override
+ public long getNumStores() {
+ return numStores;
+ }
+
+ @Override
+ public long getNumStoreFiles() {
+ return numStoreFiles;
+ }
+
+ @Override
+ public long getMemstoreSize() {
+ return memstoreSize;
+ }
+
+ @Override
+ public long getStoreFileSize() {
+ return storeFileSize;
+ }
+
+ @Override public double getRequestsPerSecond() {
+ return requestsPerSecond;
+ }
+
+ @Override
+ public long getReadRequestsCount() {
+ return readRequestsCount;
+ }
+
+ @Override
+ public long getWriteRequestsCount() {
+ return writeRequestsCount;
+ }
+
+ @Override
+ public long getCheckAndMutateChecksFailed() {
+ return checkAndMutateChecksFailed;
+ }
+
+ @Override
+ public long getCheckAndMutateChecksPassed() {
+ return checkAndMutateChecksPassed;
+ }
+
+ @Override
+ public long getStoreFileIndexSize() {
+ return storefileIndexSize;
+ }
+
+ @Override
+ public long getTotalStaticIndexSize() {
+ return totalStaticIndexSize;
+ }
+
+ @Override
+ public long getTotalStaticBloomSize() {
+ return totalStaticBloomSize;
+ }
+
+ @Override
+ public long getNumPutsWithoutWAL() {
+ return numPutsWithoutWAL;
+ }
+
+ @Override
+ public long getDataInMemoryWithoutWAL() {
+ return dataInMemoryWithoutWAL;
+ }
+
+ @Override
+ public int getPercentFileLocal() {
+ return percentFileLocal;
+ }
+
+ @Override
+ public long getUpdatesBlockedTime() {
+ return this.regionServer.cacheFlusher.getUpdatesBlockedMsHighWater().get();
+ }
+
+
+ /**
+ * This is the runnable that will be executed on the executor ever PERIOD number of seconds
+ * It will take metrics/numbers from all of the regions and use them to compute point in
+ * time metrics.
+ */
+ public class RegionServerMetricsWrapperRunnable implements Runnable {
+
+ private long lastRan = 0;
+ private long lastRequestCount = 0;
+
+ @Override
+ synchronized public void run() {
+
+ cacheStats = blockCache.getStats();
+
+ HDFSBlocksDistribution hdfsBlocksDistribution =
+ new HDFSBlocksDistribution();
+
+ long tempNumStores = 0;
+ long tempNumStoreFiles = 0;
+ long tempMemstoreSize = 0;
+ long tempStoreFileSize = 0;
+ long tempReadRequestsCount = 0;
+ long tempWriteRequestsCount = 0;
+ long tempCheckAndMutateChecksFailed = 0;
+ long tempCheckAndMutateChecksPassed = 0;
+ long tempStorefileIndexSize = 0;
+ long tempTotalStaticIndexSize = 0;
+ long tempTotalStaticBloomSize = 0;
+ long tempNumPutsWithoutWAL = 0;
+ long tempDataInMemoryWithoutWAL = 0;
+ int tempPercentFileLocal = 0;
+
+
+ for (HRegion r : regionServer.getOnlineRegionsLocalContext()) {
+ tempNumPutsWithoutWAL += r.numPutsWithoutWAL.get();
+ tempDataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get();
+ tempReadRequestsCount += r.readRequestsCount.get();
+ tempWriteRequestsCount += r.writeRequestsCount.get();
+ tempCheckAndMutateChecksFailed += r.checkAndMutateChecksFailed.get();
+ tempCheckAndMutateChecksPassed += r.checkAndMutateChecksPassed.get();
+ tempNumStores += r.stores.size();
+ for (Store store : r.stores.values()) {
+ tempNumStoreFiles += store.getStorefilesCount();
+ tempMemstoreSize += store.getMemStoreSize();
+ tempStoreFileSize += store.getStorefilesSize();
+ tempStorefileIndexSize += store.getStorefilesIndexSize();
+ tempTotalStaticBloomSize += store.getTotalStaticBloomSize();
+ tempTotalStaticIndexSize += store.getTotalStaticIndexSize();
+ }
+
+ hdfsBlocksDistribution.add(r.getHDFSBlocksDistribution());
+ }
+
+ float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex(
+ regionServer.getServerName().getHostname());
+ tempPercentFileLocal = (int) (localityIndex * 100);
+
+
+ //Compute the number of requests per second
+ long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+
+ // assume that it took PERIOD seconds to start the executor.
+ // this is a guess but it's a pretty good one.
+ if (lastRan == 0) {
+ lastRan = currentTime - (PERIOD*1000);
+ }
+
+
+ //If we've time traveled keep the last requests per second.
+ if ((currentTime - lastRan) > 10) {
+ long currentRequestCount = getTotalRequestCount();
+ requestsPerSecond = (currentRequestCount - lastRequestCount) / ((currentTime - lastRan) / 1000.0);
+ lastRequestCount = currentRequestCount;
+ }
+ lastRan = currentTime;
+
+ //Copy over computed values so that no thread sees half computed values.
+ numStores = tempNumStores;
+ numStoreFiles = tempNumStoreFiles;
+ memstoreSize = tempMemstoreSize;
+ storeFileSize = tempStoreFileSize;
+ readRequestsCount = tempReadRequestsCount;
+ writeRequestsCount = tempWriteRequestsCount;
+ checkAndMutateChecksFailed = tempCheckAndMutateChecksFailed;
+ checkAndMutateChecksPassed = tempCheckAndMutateChecksPassed;
+ storefileIndexSize = tempStorefileIndexSize;
+ totalStaticIndexSize = tempTotalStaticIndexSize;
+ totalStaticBloomSize = tempTotalStaticBloomSize;
+ numPutsWithoutWAL = tempNumPutsWithoutWAL;
+ dataInMemoryWithoutWAL = tempDataInMemoryWithoutWAL;
+ percentFileLocal = tempPercentFileLocal;
+ }
+ }
+
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
new file mode 100644
index 0000000..125cef2
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.metrics2.MetricsExecutor;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+public class MetricsRegionWrapperImpl implements MetricsRegionWrapper {
+
+ public static final int PERIOD = 45;
+
+ private final HRegion region;
+ private ScheduledExecutorService executor;
+ private Runnable runnable;
+ private long numStoreFiles;
+ private long memstoreSize;
+ private long storeFileSize;
+
+ public MetricsRegionWrapperImpl(HRegion region) {
+ this.region = region;
+ this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
+ this.runnable = new MetricsRegionWrapperImpl.HRegionMerticsWrappableRunnable();
+ this.executor.scheduleWithFixedDelay(this.runnable, PERIOD, PERIOD, TimeUnit.SECONDS);
+ }
+
+ @Override
+ public String getTableName() {
+ return this.region.getTableDesc().getNameAsString();
+ }
+
+ @Override
+ public String getRegionName() {
+ return this.region.getRegionInfo().getEncodedName();
+ }
+
+ @Override
+ public long getNumStores() {
+ return this.region.stores.size();
+ }
+
+ @Override
+ public long getNumStoreFiles() {
+ return numStoreFiles;
+ }
+
+ @Override
+ public long getMemstoreSize() {
+ return memstoreSize;
+ }
+
+ @Override
+ public long getStoreFileSize() {
+ return storeFileSize;
+ }
+
+ @Override
+ public long getReadRequestCount() {
+ return this.region.getReadRequestsCount();
+ }
+
+ @Override
+ public long getWriteRequestCount() {
+ return this.region.getWriteRequestsCount();
+ }
+
+ public class HRegionMerticsWrappableRunnable implements Runnable {
+
+ @Override
+ public void run() {
+ long tempNumStoreFiles = 0;
+ long tempMemstoreSize = 0;
+ long tempStoreFileSize = 0;
+
+ for (Store store : region.stores.values()) {
+ tempNumStoreFiles += store.getStorefilesCount();
+ tempMemstoreSize += store.getMemStoreSize();
+ tempStoreFileSize += store.getStorefilesSize();
+ }
+
+ numStoreFiles = tempNumStoreFiles;
+ memstoreSize = tempMemstoreSize;
+ storeFileSize = tempStoreFileSize;
+ }
+ }
+
+}
\ No newline at end of file
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
index f945ffd..0b5a316 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
@@ -66,7 +66,6 @@ class SplitRequest implements Runnable {
if (!st.prepare()) return;
try {
st.execute(this.server, this.server);
- this.server.getMetrics().incrementSplitSuccessCount(System.currentTimeMillis() - startTime);
} catch (Exception e) {
if (this.server.isStopping() || this.server.isStopped()) {
LOG.info(
@@ -81,7 +80,6 @@ class SplitRequest implements Runnable {
if (st.rollback(this.server, this.server)) {
LOG.info("Successful rollback of failed split of " +
parent.getRegionNameAsString());
- this.server.getMetrics().incrementSplitFailureCount();
} else {
this.server.abort("Abort; we got an error after point-of-no-return");
}
@@ -102,7 +100,6 @@ class SplitRequest implements Runnable {
} catch (IOException ex) {
LOG.error("Split failed " + this, RemoteExceptionHandler
.checkIOException(ex));
- this.server.getMetrics().incrementSplitFailureCount();
server.checkFileSystem();
} finally {
if (this.parent.getCoprocessorHost() != null) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index d391a16..1841eeb 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.SchemaAware;
import com.google.common.collect.ImmutableList;
@@ -42,7 +41,7 @@ import com.google.common.collect.ImmutableList;
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
-public interface Store extends SchemaAware, HeapSize {
+public interface Store extends HeapSize {
/* The default priority for user-specified compaction requests.
* The user gets top priority unless we have blocking compactions. (Pri <= 0)
@@ -287,4 +286,8 @@ public interface Store extends SchemaAware, HeapSize {
* @return the parent region hosting this store
*/
public HRegion getHRegion();
+
+ public String getColumnFamilyName();
+
+ public String getTableName();
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index af1225a..b1418fc 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -58,8 +58,6 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.HFileWriterV1;
import org.apache.hadoop.hbase.io.hfile.HFileWriterV2;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
import org.apache.hadoop.hbase.util.ChecksumType;
@@ -80,7 +78,7 @@ import com.google.common.collect.Ordering;
/**
* A Store data file. Stores usually have one or more of these files. They
* are produced by flushing the memstore to disk. To
- * create, instantiate a writer using {@link StoreFile#WriterBuilder}
+ * create, instantiate a writer using {@link StoreFile.WriterBuilder}
* and append data. Be sure to add any metadata before calling close on the
* Writer (Use the appendMetadata convenience methods). On close, a StoreFile
* is sitting in the Filesystem. To refer to it, create a StoreFile instance
@@ -91,7 +89,7 @@ import com.google.common.collect.Ordering;
* writer and a reader is that we write once but read a lot more.
*/
@InterfaceAudience.LimitedPrivate("Coprocessor")
-public class StoreFile extends SchemaConfigured {
+public class StoreFile {
static final Log LOG = LogFactory.getLog(StoreFile.class.getName());
public static enum BloomType {
@@ -277,7 +275,6 @@ public class StoreFile extends SchemaConfigured {
this.modificationTimeStamp = 0;
}
- SchemaMetrics.configureGlobally(conf);
}
/**
@@ -545,11 +542,6 @@ public class StoreFile extends SchemaConfigured {
dataBlockEncoder.getEncodingInCache());
}
- if (isSchemaConfigured()) {
- SchemaConfigured.resetSchemaMetricsConf(reader);
- passSchemaMetricsTo(reader);
- }
-
computeHDFSBlockDistribution();
// Load up indices and fileinfo. This also loads Bloom filter type.
@@ -1287,7 +1279,7 @@ public class StoreFile extends SchemaConfigured {
/**
* Reader for a StoreFile.
*/
- public static class Reader extends SchemaConfigured {
+ public static class Reader {
static final Log LOG = LogFactory.getLog(Reader.class.getName());
protected BloomFilter generalBloomFilter = null;
@@ -1301,7 +1293,6 @@ public class StoreFile extends SchemaConfigured {
public Reader(FileSystem fs, Path path, CacheConfig cacheConf,
DataBlockEncoding preferredEncodingInCache) throws IOException {
- super(path);
reader = HFile.createReaderWithEncoding(fs, path, cacheConf,
preferredEncodingInCache);
bloomFilterType = BloomType.NONE;
@@ -1310,7 +1301,6 @@ public class StoreFile extends SchemaConfigured {
public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size,
CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
boolean closeIStream) throws IOException {
- super(path);
FSDataInputStream in = hfileLink.open(fs);
FSDataInputStream inNoChecksum = in;
@@ -1584,7 +1574,6 @@ public class StoreFile extends SchemaConfigured {
&& bloomFilter.contains(key, 0, key.length, bloom);
}
- getSchemaMetrics().updateBloomMetrics(exists);
return exists;
}
} catch (IOException e) {
@@ -1728,10 +1717,6 @@ public class StoreFile extends SchemaConfigured {
return reader.indexSize();
}
- public String getColumnFamilyName() {
- return reader.getColumnFamilyName();
- }
-
public BloomType getBloomFilterType() {
return this.bloomFilterType;
}
@@ -1774,11 +1759,6 @@ public class StoreFile extends SchemaConfigured {
public long getMaxTimestamp() {
return timeRangeTracker.maximumTimestamp;
}
-
- @Override
- public void schemaConfigurationChanged() {
- passSchemaMetricsTo((SchemaConfigured) reader);
- }
}
/**
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index b595c06..4ccdbcc 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -33,8 +33,6 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.regionserver.HStore.ScanInfo;
-import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -110,7 +108,6 @@ public class StoreScanner extends NonLazyKeyValueScanner
throws IOException {
this(store, scan.getCacheBlocks(), scan, columns, scanInfo.getTtl(),
scanInfo.getMinVersions());
- initializeMetricNames();
if (columns != null && scan.isRaw()) {
throw new DoNotRetryIOException(
"Cannot specify any column for a raw scan");
@@ -163,7 +160,6 @@ public class StoreScanner extends NonLazyKeyValueScanner
long smallestReadPoint, long earliestPutTs) throws IOException {
this(store, false, scan, null, scanInfo.getTtl(),
scanInfo.getMinVersions());
- initializeMetricNames();
matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType,
smallestReadPoint, earliestPutTs, oldestUnexpiredTS);
@@ -194,7 +190,6 @@ public class StoreScanner extends NonLazyKeyValueScanner
throws IOException {
this(null, scan.getCacheBlocks(), scan, columns, scanInfo.getTtl(),
scanInfo.getMinVersions());
- this.initializeMetricNames();
this.matcher = new ScanQueryMatcher(scan, scanInfo, columns, scanType,
Long.MAX_VALUE, earliestPutTs, oldestUnexpiredTS);
@@ -206,23 +201,6 @@ public class StoreScanner extends NonLazyKeyValueScanner
}
/**
- * Method used internally to initialize metric names throughout the
- * constructors.
- *
- * To be called after the store variable has been initialized!
- */
- private void initializeMetricNames() {
- String tableName = SchemaMetrics.UNKNOWN;
- String family = SchemaMetrics.UNKNOWN;
- if (store != null) {
- tableName = store.getTableName();
- family = Bytes.toString(store.getFamily().getName());
- }
- this.metricNamePrefix =
- SchemaMetrics.generateSchemaMetricsPrefix(tableName, family);
- }
-
- /**
* Get a filtered list of scanners. Assumes we are not in a compaction.
* @return list of scanners to seek
*/
@@ -458,8 +436,7 @@ public class StoreScanner extends NonLazyKeyValueScanner
}
} finally {
if (cumulativeMetric > 0 && metric != null) {
- RegionMetricsStorage.incrNumericMetric(this.metricNamePrefix + metric,
- cumulativeMetric);
+
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
index bee9668..80e4d5e 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
@@ -253,7 +253,6 @@ public class CompactionRequest implements Comparable,
LOG.info(((completed) ? "completed" : "aborted") + " compaction: " +
this + "; duration=" + StringUtils.formatTimeDiff(now, start));
if (completed) {
- server.getMetrics().addCompaction(now - start, this.totalSize);
// degenerate case: blocked regions require recursive enqueues
if (s.getCompactPriority() <= 0) {
server.compactSplitThread
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java
deleted file mode 100644
index bff18c3..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.regionserver.metrics;
-
-import java.util.Set;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Increment;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * This class provides a simplified interface to expose time varying metrics
- * about GET/DELETE/PUT/ICV operations on a region and on Column Families. All
- * metrics are stored in {@link RegionMetricsStorage} and exposed to hadoop
- * metrics through {@link RegionServerDynamicMetrics}.
- */
-@InterfaceAudience.Private
-public class OperationMetrics {
-
- private static final String DELETE_KEY = "delete_";
- private static final String PUT_KEY = "put_";
- private static final String GET_KEY = "get_";
- private static final String ICV_KEY = "incrementColumnValue_";
- private static final String INCREMENT_KEY = "increment_";
- private static final String MULTIPUT_KEY = "multiput_";
- private static final String MULTIDELETE_KEY = "multidelete_";
- private static final String APPEND_KEY = "append_";
-
- /** Conf key controlling whether we should expose metrics.*/
- private static final String CONF_KEY =
- "hbase.metrics.exposeOperationTimes";
-
- private String tableName = null;
- private String regionName = null;
- private String regionMetrixPrefix = null;
- private Configuration conf = null;
-
-
- /**
- * Create a new OperationMetrics
- * @param conf The Configuration of the HRegion reporting operations coming in.
- * @param regionInfo The region info
- */
- public OperationMetrics(Configuration conf, HRegionInfo regionInfo) {
- // Configure SchemaMetrics before trying to create a RegionOperationMetrics instance as
- // RegionOperationMetrics relies on SchemaMetrics to do naming.
- if (conf != null) {
- SchemaMetrics.configureGlobally(conf);
-
- this.conf = conf;
- if (regionInfo != null) {
- this.tableName = regionInfo.getTableNameAsString();
- this.regionName = regionInfo.getEncodedName();
- } else {
- this.tableName = SchemaMetrics.UNKNOWN;
- this.regionName = SchemaMetrics.UNKNOWN;
- }
- this.regionMetrixPrefix =
- SchemaMetrics.generateRegionMetricsPrefix(this.tableName, this.regionName);
- }
- }
-
- /**
- * This is used in creating a testing HRegion where the regionInfo is unknown
- * @param conf
- */
- public OperationMetrics() {
- this(null, null);
- }
-
-
- /**
- * Update the stats associated with {@link HTable#put(java.util.List)}.
- *
- * @param columnFamilies Set of CF's this multiput is associated with
- * @param value the time
- */
- public void updateMultiPutMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, MULTIPUT_KEY, value);
- }
-
- /**
- * Update the stats associated with {@link HTable#delete(java.util.List)}.
- *
- * @param columnFamilies Set of CF's this multidelete is associated with
- * @param value the time
- */
- public void updateMultiDeleteMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, MULTIDELETE_KEY, value);
- }
-
- /**
- * Update the metrics associated with a {@link Get}
- *
- * @param columnFamilies
- * Set of Column Families in this get.
- * @param value
- * the time
- */
- public void updateGetMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, GET_KEY, value);
- }
-
- /**
- * Update metrics associated with an {@link Increment}
- * @param columnFamilies
- * @param value
- */
- public void updateIncrementMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, INCREMENT_KEY, value);
- }
-
-
- /**
- * Update the metrics associated with an {@link Append}
- * @param columnFamilies
- * @param value
- */
- public void updateAppendMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, APPEND_KEY, value);
- }
-
-
- /**
- * Update the metrics associated with
- * {@link HTable#incrementColumnValue(byte[], byte[], byte[], long)}
- *
- * @param columnFamily
- * The single column family associated with an ICV
- * @param value
- * the time
- */
- public void updateIncrementColumnValueMetrics(byte[] columnFamily, long value) {
- String cfMetricPrefix =
- SchemaMetrics.generateSchemaMetricsPrefix(this.tableName, Bytes.toString(columnFamily));
- doSafeIncTimeVarying(cfMetricPrefix, ICV_KEY, value);
- doSafeIncTimeVarying(this.regionMetrixPrefix, ICV_KEY, value);
- }
-
- /**
- * update metrics associated with a {@link Put}
- *
- * @param columnFamilies
- * Set of column families involved.
- * @param value
- * the time.
- */
- public void updatePutMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, PUT_KEY, value);
- }
-
- /**
- * update metrics associated with a {@link Delete}
- *
- * @param columnFamilies
- * @param value
- * the time.
- */
- public void updateDeleteMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, DELETE_KEY, value);
- }
-
- /**
- * This deletes all old metrics this instance has ever created or updated.
- */
- public void closeMetrics() {
- RegionMetricsStorage.clear();
- }
-
- /**
- * Method to send updates for cf and region metrics. This is the normal method
- * used if the naming of stats and CF's are in line with put/delete/multiput.
- *
- * @param columnFamilies
- * the set of column families involved.
- * @param key
- * the metric name.
- * @param value
- * the time.
- */
- private void doUpdateTimeVarying(Set columnFamilies, String key, long value) {
- String cfPrefix = null;
- if (columnFamilies != null) {
- cfPrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName, columnFamilies);
- } else {
- cfPrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName, SchemaMetrics.UNKNOWN);
- }
-
- doSafeIncTimeVarying(cfPrefix, key, value);
- doSafeIncTimeVarying(this.regionMetrixPrefix, key, value);
- }
-
- private void doSafeIncTimeVarying(String prefix, String key, long value) {
- if (conf.getBoolean(CONF_KEY, true)) {
- if (prefix != null && !prefix.isEmpty() && key != null && !key.isEmpty()) {
- String m = prefix + key;
- RegionMetricsStorage.incrTimeVaryingMetric(m, value);
- }
- }
- }
-
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java
deleted file mode 100644
index 5d4beff..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.metrics;
-
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.Pair;
-
-/**
- * This class if for maintaining the maps used to power metrics for hfiles,
- * regions, and regionservers. It has methods to mutate and get state of metrics
- * numbers. These numbers are exposed to Hadoop metrics through
- * RegionServerDynamicMetrics.
- */
-@InterfaceAudience.Private
-public class RegionMetricsStorage {
-
- // for simple numeric metrics (# of blocks read from block cache)
- private static final ConcurrentMap numericMetrics =
- new ConcurrentHashMap();
-
- // for simple numeric metrics (current block cache size)
- // These ones are not reset to zero when queried, unlike the previous.
- private static final ConcurrentMap numericPersistentMetrics =
- new ConcurrentHashMap();
-
- /**
- * Used for metrics where we want track a metrics (such as latency) over a
- * number of operations.
- */
- private static final ConcurrentMap> timeVaryingMetrics =
- new ConcurrentHashMap>();
-
- public static Map getNumericMetrics() {
- return numericMetrics;
- }
-
- public static Map getNumericPersistentMetrics() {
- return numericPersistentMetrics;
- }
-
- public static Map> getTimeVaryingMetrics() {
- return timeVaryingMetrics;
- }
-
- public static void incrNumericMetric(String key, long amount) {
- AtomicLong oldVal = numericMetrics.get(key);
- if (oldVal == null) {
- oldVal = numericMetrics.putIfAbsent(key, new AtomicLong(amount));
- if (oldVal == null)
- return;
- }
- oldVal.addAndGet(amount);
- }
-
- public static void incrTimeVaryingMetric(String key, long amount) {
- Pair oldVal = timeVaryingMetrics.get(key);
- if (oldVal == null) {
- oldVal =
- timeVaryingMetrics.putIfAbsent(key,
- new Pair(
- new AtomicLong(amount),
- new AtomicInteger(1)));
- if (oldVal == null)
- return;
- }
- oldVal.getFirst().addAndGet(amount); // total time
- oldVal.getSecond().incrementAndGet(); // increment ops by 1
- }
-
- public static void incrNumericPersistentMetric(String key, long amount) {
- AtomicLong oldVal = numericPersistentMetrics.get(key);
- if (oldVal == null) {
- oldVal = numericPersistentMetrics.putIfAbsent(key, new AtomicLong(amount));
- if (oldVal == null)
- return;
- }
- oldVal.addAndGet(amount);
- }
-
- public static void setNumericMetric(String key, long amount) {
- numericMetrics.put(key, new AtomicLong(amount));
- }
-
- public static long getNumericMetric(String key) {
- AtomicLong m = numericMetrics.get(key);
- if (m == null)
- return 0;
- return m.get();
- }
-
- public static Pair getTimeVaryingMetric(String key) {
- Pair pair = timeVaryingMetrics.get(key);
- if (pair == null) {
- return new Pair(0L, 0);
- }
-
- return new Pair(pair.getFirst().get(), pair.getSecond().get());
- }
-
- public static long getNumericPersistentMetric(String key) {
- AtomicLong m = numericPersistentMetrics.get(key);
- if (m == null)
- return 0;
- return m.get();
- }
-
- /**
- * Clear all copies of the metrics this stores.
- */
- public static void clear() {
- timeVaryingMetrics.clear();
- numericMetrics.clear();
- numericPersistentMetrics.clear();
- }
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java
deleted file mode 100644
index bb06a10..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.metrics;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
-import org.apache.hadoop.metrics.util.MetricsBase;
-import org.apache.hadoop.metrics.util.MetricsLongValue;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
-
-/**
- *
- * This class is for maintaining the various RPC statistics
- * and publishing them through the metrics interfaces.
- * This also registers the JMX MBean for RPC.
- *
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values;
- * for example: rpcQueueTime.inc(time)
- *
- */
-@InterfaceAudience.Private
-public class RegionServerDynamicMetrics implements Updater {
- private static final String UNABLE_TO_CLEAR = "Unable to clear RegionServerDynamicMetrics";
-
- private MetricsRecord metricsRecord;
- private MetricsContext context;
- private final RegionServerDynamicStatistics rsDynamicStatistics;
- private Method updateMbeanInfoIfMetricsListChanged = null;
- private static final Log LOG =
- LogFactory.getLog(RegionServerDynamicStatistics.class);
-
- private boolean reflectionInitialized = false;
- private boolean needsUpdateMessage = false;
- private Field recordMetricMapField;
- private Field registryMetricMapField;
-
- /**
- * The metrics variables are public:
- * - they can be set directly by calling their set/inc methods
- * -they can also be read directly - e.g. JMX does this.
- */
- public final MetricsRegistry registry = new MetricsRegistry();
-
- private RegionServerDynamicMetrics() {
- this.context = MetricsUtil.getContext("hbase-dynamic");
- this.metricsRecord = MetricsUtil.createRecord(
- this.context,
- "RegionServerDynamicStatistics");
- context.registerUpdater(this);
- this.rsDynamicStatistics = new RegionServerDynamicStatistics(this.registry);
- try {
- updateMbeanInfoIfMetricsListChanged =
- this.rsDynamicStatistics.getClass().getSuperclass()
- .getDeclaredMethod("updateMbeanInfoIfMetricsListChanged",
- new Class[]{});
- updateMbeanInfoIfMetricsListChanged.setAccessible(true);
- } catch (Exception e) {
- LOG.error(e);
- }
- }
-
- public static RegionServerDynamicMetrics newInstance() {
- RegionServerDynamicMetrics metrics =
- new RegionServerDynamicMetrics();
- return metrics;
- }
-
- public synchronized void setNumericMetric(String name, long amt) {
- MetricsLongValue m = (MetricsLongValue)registry.get(name);
- if (m == null) {
- m = new MetricsLongValue(name, this.registry);
- this.needsUpdateMessage = true;
- }
- m.set(amt);
- }
-
- public synchronized void incrTimeVaryingMetric(
- String name,
- long amt,
- int numOps) {
- MetricsTimeVaryingRate m = (MetricsTimeVaryingRate)registry.get(name);
- if (m == null) {
- m = new MetricsTimeVaryingRate(name, this.registry);
- this.needsUpdateMessage = true;
- }
- if (numOps > 0) {
- m.inc(numOps, amt);
- }
- }
-
- /**
- * Clear all metrics this exposes.
- * Uses reflection to clear them from hadoop metrics side as well.
- */
- @SuppressWarnings("rawtypes")
- public void clear() {
- this.needsUpdateMessage = true;
- // If this is the first clear use reflection to get the two maps that hold copies of our
- // metrics on the hadoop metrics side. We have to use reflection because there is not
- // remove metrics on the hadoop side. If we can't get them then clearing old metrics
- // is not possible and bailing out early is our best option.
- if (!this.reflectionInitialized) {
- this.reflectionInitialized = true;
- try {
- this.recordMetricMapField = this.metricsRecord.getClass().getDeclaredField("metricTable");
- this.recordMetricMapField.setAccessible(true);
- } catch (SecurityException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- return;
- } catch (NoSuchFieldException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- return;
- }
-
- try {
- this.registryMetricMapField = this.registry.getClass().getDeclaredField("metricsList");
- this.registryMetricMapField.setAccessible(true);
- } catch (SecurityException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- return;
- } catch (NoSuchFieldException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- return;
- }
- }
-
-
- //If we found both fields then try and clear the maps.
- if (this.recordMetricMapField != null && this.registryMetricMapField != null) {
- try {
- Map recordMap = (Map) this.recordMetricMapField.get(this.metricsRecord);
- recordMap.clear();
- Map registryMap = (Map) this.registryMetricMapField.get(this.registry);
- registryMap.clear();
- } catch (IllegalArgumentException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- } catch (IllegalAccessException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- }
- } else {
- LOG.debug(UNABLE_TO_CLEAR);
- }
- }
-
- /**
- * Push the metrics to the monitoring subsystem on doUpdate() call.
- * @param context ctx
- */
- public void doUpdates(MetricsContext context) {
- /* get dynamically created numeric metrics, and push the metrics */
- for (Entry entry : RegionMetricsStorage.getNumericMetrics().entrySet()) {
- this.setNumericMetric(entry.getKey(), entry.getValue().getAndSet(0));
- }
- /* get dynamically created numeric metrics, and push the metrics.
- * These ones aren't to be reset; they are cumulative. */
- for (Entry entry : RegionMetricsStorage.getNumericPersistentMetrics().entrySet()) {
- this.setNumericMetric(entry.getKey(), entry.getValue().get());
- }
- /* get dynamically created time varying metrics, and push the metrics */
- for (Entry> entry :
- RegionMetricsStorage.getTimeVaryingMetrics().entrySet()) {
- Pair value = entry.getValue();
- this.incrTimeVaryingMetric(entry.getKey(),
- value.getFirst().getAndSet(0),
- value.getSecond().getAndSet(0));
- }
-
- // If there are new metrics sending this message to jmx tells it to update everything.
- // This is not ideal we should just move to metrics2 that has full support for dynamic metrics.
- if (needsUpdateMessage) {
- try {
- if (updateMbeanInfoIfMetricsListChanged != null) {
- updateMbeanInfoIfMetricsListChanged.invoke(this.rsDynamicStatistics,
- new Object[]{});
- }
- } catch (Exception e) {
- LOG.error(e);
- }
- needsUpdateMessage = false;
- }
-
-
- synchronized (registry) {
- // Iterate through the registry to propagate the different rpc metrics.
- for (String metricName : registry.getKeyList() ) {
- MetricsBase value = registry.get(metricName);
- value.pushMetric(metricsRecord);
- }
- }
- metricsRecord.update();
- }
-
- public void shutdown() {
- if (rsDynamicStatistics != null)
- rsDynamicStatistics.shutdown();
- }
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java
deleted file mode 100644
index b4df6a7..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.metrics;
-
-import javax.management.ObjectName;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.metrics.MetricsMBeanBase;
-import org.apache.hadoop.metrics.util.MBeanUtil;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-
-/**
- * Exports dynamic region server metric recorded in
- * {@link RegionServerDynamicMetrics} as an MBean
- * for JMX monitoring.
- */
-@InterfaceAudience.Private
-public class RegionServerDynamicStatistics extends MetricsMBeanBase {
- private final ObjectName mbeanName;
-
- public RegionServerDynamicStatistics(MetricsRegistry registry) {
- super(registry, "RegionServerDynamicStatistics");
- mbeanName = MBeanUtil.registerMBean("RegionServer", "RegionServerDynamicStatistics", this);
- }
-
- public void shutdown() {
- if (mbeanName != null)
- MBeanUtil.unregisterMBean(mbeanName);
- }
-
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java
deleted file mode 100644
index d8883e9..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java
+++ /dev/null
@@ -1,626 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver.metrics;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryUsage;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.metrics.ExactCounterMetric;
-import org.apache.hadoop.hbase.metrics.HBaseInfo;
-import org.apache.hadoop.hbase.metrics.MetricsRate;
-import org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram;
-import org.apache.hadoop.hbase.metrics.PersistentMetricsTimeVaryingRate;
-import com.yammer.metrics.stats.Snapshot;
-import org.apache.hadoop.hbase.regionserver.wal.HLogMetrics;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Strings;
-import org.apache.hadoop.metrics.ContextFactory;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
-import org.apache.hadoop.metrics.util.MetricsIntValue;
-import org.apache.hadoop.metrics.util.MetricsLongValue;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * This class is for maintaining the various regionserver statistics
- * and publishing them through the metrics interfaces.
- *
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values.
- */
-@InterfaceAudience.Private
-public class RegionServerMetrics implements Updater {
- @SuppressWarnings({"FieldCanBeLocal"})
- private final Log LOG = LogFactory.getLog(this.getClass());
- private final MetricsRecord metricsRecord;
- private long lastUpdate = System.currentTimeMillis();
- private long lastExtUpdate = System.currentTimeMillis();
- private long extendedPeriod = 0;
- private static final int MB = 1024*1024;
- private MetricsRegistry registry = new MetricsRegistry();
- private final RegionServerStatistics statistics;
-
- public final MetricsTimeVaryingRate atomicIncrementTime =
- new MetricsTimeVaryingRate("atomicIncrementTime", registry);
-
- /**
- * Count of regions carried by this regionserver
- */
- public final MetricsIntValue regions =
- new MetricsIntValue("regions", registry);
-
- /**
- * Block cache size.
- */
- public final MetricsLongValue blockCacheSize =
- new MetricsLongValue("blockCacheSize", registry);
-
- /**
- * Block cache free size.
- */
- public final MetricsLongValue blockCacheFree =
- new MetricsLongValue("blockCacheFree", registry);
-
- /**
- * Block cache item count.
- */
- public final MetricsLongValue blockCacheCount =
- new MetricsLongValue("blockCacheCount", registry);
-
- /**
- * Block cache hit count.
- */
- public final MetricsLongValue blockCacheHitCount =
- new MetricsLongValue("blockCacheHitCount", registry);
-
- /**
- * Block cache miss count.
- */
- public final MetricsLongValue blockCacheMissCount =
- new MetricsLongValue("blockCacheMissCount", registry);
-
- /**
- * Block cache evict count.
- */
- public final MetricsLongValue blockCacheEvictedCount =
- new MetricsLongValue("blockCacheEvictedCount", registry);
-
- /**
- * Block hit ratio.
- */
- public final MetricsIntValue blockCacheHitRatio =
- new MetricsIntValue("blockCacheHitRatio", registry);
-
- /**
- * Block hit caching ratio. This only includes the requests to the block
- * cache where caching was turned on. See HBASE-2253.
- */
- public final MetricsIntValue blockCacheHitCachingRatio =
- new MetricsIntValue("blockCacheHitCachingRatio", registry);
-
- /** Block hit ratio for past N periods. */
- public final MetricsIntValue blockCacheHitRatioPastNPeriods = new MetricsIntValue("blockCacheHitRatioPastNPeriods", registry);
-
- /** Block hit caching ratio for past N periods */
- public final MetricsIntValue blockCacheHitCachingRatioPastNPeriods = new MetricsIntValue("blockCacheHitCachingRatioPastNPeriods", registry);
-
- /*
- * Count of requests to the regionservers since last call to metrics update
- */
- public final MetricsRate requests = new MetricsRate("requests", registry);
-
- /**
- * Count of stores open on the regionserver.
- */
- public final MetricsIntValue stores = new MetricsIntValue("stores", registry);
-
- /**
- * Count of storefiles open on the regionserver.
- */
- public final MetricsIntValue storefiles =
- new MetricsIntValue("storefiles", registry);
-
- /**
- * Count of read requests
- */
- public final MetricsLongValue readRequestsCount =
- new MetricsLongValue("readRequestsCount", registry);
-
- /**
- * Count of write requests
- */
- public final MetricsLongValue writeRequestsCount =
- new MetricsLongValue("writeRequestsCount", registry);
-
- /**
- * Count of checkAndMutates the failed the check
- */
- public final MetricsLongValue checkAndMutateChecksFailed =
- new MetricsLongValue("checkAndMutateChecksFailed", registry);
-
- /**
- * Count of checkAndMutates that passed the check
- */
- public final MetricsLongValue checkAndMutateChecksPassed =
- new MetricsLongValue("checkAndMutateChecksPassed", registry);
- /**
- */
- public final MetricsIntValue storefileIndexSizeMB =
- new MetricsIntValue("storefileIndexSizeMB", registry);
-
- /** The total size of block index root levels in this regionserver in KB. */
- public final MetricsIntValue rootIndexSizeKB =
- new MetricsIntValue("rootIndexSizeKB", registry);
-
- /** Total size of all block indexes (not necessarily loaded in memory) */
- public final MetricsIntValue totalStaticIndexSizeKB =
- new MetricsIntValue("totalStaticIndexSizeKB", registry);
-
- /** Total size of all Bloom filters (not necessarily loaded in memory) */
- public final MetricsIntValue totalStaticBloomSizeKB =
- new MetricsIntValue("totalStaticBloomSizeKB", registry);
-
- /**
- * HDFS blocks locality index
- */
- public final MetricsIntValue hdfsBlocksLocalityIndex =
- new MetricsIntValue("hdfsBlocksLocalityIndex", registry);
-
- /**
- * Sum of all the memstore sizes in this regionserver in MB
- */
- public final MetricsIntValue memstoreSizeMB =
- new MetricsIntValue("memstoreSizeMB", registry);
-
- /**
- * Number of put with WAL disabled in this regionserver in MB
- */
- public final MetricsLongValue numPutsWithoutWAL =
- new MetricsLongValue("numPutsWithoutWAL", registry);
-
- /**
- * Possible data loss sizes (due to put with WAL disabled) in this regionserver in MB
- */
- public final MetricsIntValue mbInMemoryWithoutWAL =
- new MetricsIntValue("mbInMemoryWithoutWAL", registry);
-
- /**
- * Size of the compaction queue.
- */
- public final MetricsIntValue compactionQueueSize =
- new MetricsIntValue("compactionQueueSize", registry);
-
- /**
- * Size of the flush queue.
- */
- public final MetricsIntValue flushQueueSize =
- new MetricsIntValue("flushQueueSize", registry);
-
- /**
- * filesystem sequential read latency distribution
- */
- public final MetricsHistogram fsReadLatencyHistogram =
- new MetricsHistogram("fsReadLatencyHistogram", registry);
-
- /**
- * filesystem pread latency distribution
- */
- public final MetricsHistogram fsPreadLatencyHistogram =
- new MetricsHistogram("fsPreadLatencyHistogram", registry);
-
- /**
- * Metrics on the distribution of filesystem write latencies (improved version of fsWriteLatency)
- */
- public final MetricsHistogram fsWriteLatencyHistogram =
- new MetricsHistogram("fsWriteLatencyHistogram", registry);
-
-
- /**
- * filesystem read latency
- */
- public final MetricsTimeVaryingRate fsReadLatency =
- new MetricsTimeVaryingRate("fsReadLatency", registry);
-
- /**
- * filesystem positional read latency
- */
- public final MetricsTimeVaryingRate fsPreadLatency =
- new MetricsTimeVaryingRate("fsPreadLatency", registry);
-
- /**
- * filesystem write latency
- */
- public final MetricsTimeVaryingRate fsWriteLatency =
- new MetricsTimeVaryingRate("fsWriteLatency", registry);
-
- /**
- * size (in bytes) of data in HLog append calls
- */
- public final MetricsTimeVaryingRate fsWriteSize =
- new MetricsTimeVaryingRate("fsWriteSize", registry);
-
- /**
- * filesystem sync latency
- */
- public final MetricsTimeVaryingRate fsSyncLatency =
- new MetricsTimeVaryingRate("fsSyncLatency", registry);
-
-
- /**
- * time each scheduled compaction takes
- */
- protected final MetricsHistogram compactionTime =
- new MetricsHistogram("compactionTime", registry);
-
- protected final MetricsHistogram compactionSize =
- new MetricsHistogram("compactionSize", registry);
-
- /**
- * time each scheduled flush takes
- */
- protected final MetricsHistogram flushTime =
- new MetricsHistogram("flushTime", registry);
-
- protected final MetricsHistogram flushSize =
- new MetricsHistogram("flushSize", registry);
-
- public final MetricsLongValue slowHLogAppendCount =
- new MetricsLongValue("slowHLogAppendCount", registry);
-
- public final MetricsTimeVaryingRate slowHLogAppendTime =
- new MetricsTimeVaryingRate("slowHLogAppendTime", registry);
-
- public final PersistentMetricsTimeVaryingRate regionSplitSuccessCount =
- new PersistentMetricsTimeVaryingRate("regionSplitSuccessCount", registry);
-
- public final MetricsLongValue regionSplitFailureCount =
- new MetricsLongValue("regionSplitFailureCount", registry);
-
- /**
- * Number of times checksum verification failed.
- */
- public final MetricsLongValue checksumFailuresCount =
- new MetricsLongValue("checksumFailuresCount", registry);
-
- /**
- * time blocked on lack of resources
- */
- public final MetricsHistogram updatesBlockedSeconds = new MetricsHistogram(
- "updatesBlockedSeconds", registry);
-
- /**
- * time blocked on memstoreHW
- */
- public final MetricsHistogram updatesBlockedSecondsHighWater = new MetricsHistogram(
- "updatesBlockedSecondsHighWater",registry);
-
- public RegionServerMetrics() {
- MetricsContext context = MetricsUtil.getContext("hbase");
- metricsRecord = MetricsUtil.createRecord(context, "regionserver");
- String name = Thread.currentThread().getName();
- metricsRecord.setTag("RegionServer", name);
- context.registerUpdater(this);
- // Add jvmmetrics.
- JvmMetrics.init("RegionServer", name);
- // Add Hbase Info metrics
- HBaseInfo.init();
-
- // export for JMX
- statistics = new RegionServerStatistics(this.registry, name);
-
- // get custom attributes
- try {
- Object m = ContextFactory.getFactory().getAttribute("hbase.extendedperiod");
- if (m instanceof String) {
- this.extendedPeriod = Long.parseLong((String) m)*1000;
- }
- } catch (IOException ioe) {
- LOG.info("Couldn't load ContextFactory for Metrics config info");
- }
-
- LOG.info("Initialized");
- }
-
- public void shutdown() {
- if (statistics != null)
- statistics.shutdown();
- }
-
- /**
- * Since this object is a registered updater, this method will be called
- * periodically, e.g. every 5 seconds.
- * @param caller the metrics context that this responsible for calling us
- */
- public void doUpdates(MetricsContext caller) {
- synchronized (this) {
- this.lastUpdate = System.currentTimeMillis();
-
- // has the extended period for long-living stats elapsed?
- if (this.extendedPeriod > 0 &&
- this.lastUpdate - this.lastExtUpdate >= this.extendedPeriod) {
- this.lastExtUpdate = this.lastUpdate;
- this.compactionTime.clear();
- this.compactionSize.clear();
- this.flushTime.clear();
- this.flushSize.clear();
- this.resetAllMinMax();
- }
-
- this.stores.pushMetric(this.metricsRecord);
- this.storefiles.pushMetric(this.metricsRecord);
- this.storefileIndexSizeMB.pushMetric(this.metricsRecord);
- this.rootIndexSizeKB.pushMetric(this.metricsRecord);
- this.totalStaticIndexSizeKB.pushMetric(this.metricsRecord);
- this.totalStaticBloomSizeKB.pushMetric(this.metricsRecord);
- this.memstoreSizeMB.pushMetric(this.metricsRecord);
- this.mbInMemoryWithoutWAL.pushMetric(this.metricsRecord);
- this.numPutsWithoutWAL.pushMetric(this.metricsRecord);
- this.readRequestsCount.pushMetric(this.metricsRecord);
- this.writeRequestsCount.pushMetric(this.metricsRecord);
- this.regions.pushMetric(this.metricsRecord);
- this.requests.pushMetric(this.metricsRecord);
- this.compactionQueueSize.pushMetric(this.metricsRecord);
- this.flushQueueSize.pushMetric(this.metricsRecord);
- this.blockCacheSize.pushMetric(this.metricsRecord);
- this.blockCacheFree.pushMetric(this.metricsRecord);
- this.blockCacheCount.pushMetric(this.metricsRecord);
- this.blockCacheHitCount.pushMetric(this.metricsRecord);
- this.blockCacheMissCount.pushMetric(this.metricsRecord);
- this.blockCacheEvictedCount.pushMetric(this.metricsRecord);
- this.blockCacheHitRatio.pushMetric(this.metricsRecord);
- this.blockCacheHitCachingRatio.pushMetric(this.metricsRecord);
- this.hdfsBlocksLocalityIndex.pushMetric(this.metricsRecord);
- this.blockCacheHitRatioPastNPeriods.pushMetric(this.metricsRecord);
- this.blockCacheHitCachingRatioPastNPeriods.pushMetric(this.metricsRecord);
-
- // Mix in HFile and HLog metrics
- // Be careful. Here is code for MTVR from up in hadoop:
- // public synchronized void inc(final int numOps, final long time) {
- // currentData.numOperations += numOps;
- // currentData.time += time;
- // long timePerOps = time/numOps;
- // minMax.update(timePerOps);
- // }
- // Means you can't pass a numOps of zero or get a ArithmeticException / by zero.
- // HLog metrics
- addHLogMetric(HLogMetrics.getWriteTime(), this.fsWriteLatency);
- addHLogMetric(HLogMetrics.getWriteSize(), this.fsWriteSize);
- addHLogMetric(HLogMetrics.getSyncTime(), this.fsSyncLatency);
- addHLogMetric(HLogMetrics.getSlowAppendTime(), this.slowHLogAppendTime);
- this.slowHLogAppendCount.set(HLogMetrics.getSlowAppendCount());
- // HFile metrics, sequential reads
- int ops = HFile.getReadOps();
- if (ops != 0) this.fsReadLatency.inc(ops, HFile.getReadTimeMs());
- // HFile metrics, positional reads
- ops = HFile.getPreadOps();
- if (ops != 0) this.fsPreadLatency.inc(ops, HFile.getPreadTimeMs());
- this.checksumFailuresCount.set(HFile.getChecksumFailuresCount());
-
- /* NOTE: removed HFile write latency. 2 reasons:
- * 1) Mixing HLog latencies are far higher priority since they're
- * on-demand and HFile is used in background (compact/flush)
- * 2) HFile metrics are being handled at a higher level
- * by compaction & flush metrics.
- */
-
- for(Long latency : HFile.getReadLatenciesNanos()) {
- this.fsReadLatencyHistogram.update(latency);
- }
- for(Long latency : HFile.getPreadLatenciesNanos()) {
- this.fsPreadLatencyHistogram.update(latency);
- }
- for(Long latency : HFile.getWriteLatenciesNanos()) {
- this.fsWriteLatencyHistogram.update(latency);
- }
-
-
- // push the result
- this.fsPreadLatency.pushMetric(this.metricsRecord);
- this.fsReadLatency.pushMetric(this.metricsRecord);
- this.fsWriteLatency.pushMetric(this.metricsRecord);
- this.fsWriteSize.pushMetric(this.metricsRecord);
-
- this.fsReadLatencyHistogram.pushMetric(this.metricsRecord);
- this.fsWriteLatencyHistogram.pushMetric(this.metricsRecord);
- this.fsPreadLatencyHistogram.pushMetric(this.metricsRecord);
-
- this.fsSyncLatency.pushMetric(this.metricsRecord);
- this.compactionTime.pushMetric(this.metricsRecord);
- this.compactionSize.pushMetric(this.metricsRecord);
- this.flushTime.pushMetric(this.metricsRecord);
- this.flushSize.pushMetric(this.metricsRecord);
- this.slowHLogAppendCount.pushMetric(this.metricsRecord);
- this.regionSplitSuccessCount.pushMetric(this.metricsRecord);
- this.regionSplitFailureCount.pushMetric(this.metricsRecord);
- this.checksumFailuresCount.pushMetric(this.metricsRecord);
- this.updatesBlockedSeconds.pushMetric(this.metricsRecord);
- this.updatesBlockedSecondsHighWater.pushMetric(this.metricsRecord);
- }
- this.metricsRecord.update();
- }
-
- private void addHLogMetric(HLogMetrics.Metric logMetric,
- MetricsTimeVaryingRate hadoopMetric) {
- if (logMetric.count > 0)
- hadoopMetric.inc(logMetric.min);
- if (logMetric.count > 1)
- hadoopMetric.inc(logMetric.max);
- if (logMetric.count > 2) {
- int ops = logMetric.count - 2;
- hadoopMetric.inc(ops, logMetric.total - logMetric.max - logMetric.min);
- }
- }
-
- public void resetAllMinMax() {
- this.atomicIncrementTime.resetMinMax();
- this.fsReadLatency.resetMinMax();
- this.fsWriteLatency.resetMinMax();
- this.fsWriteSize.resetMinMax();
- this.fsSyncLatency.resetMinMax();
- this.slowHLogAppendTime.resetMinMax();
- }
-
- /**
- * @return Count of requests.
- */
- public float getRequests() {
- return this.requests.getPreviousIntervalValue();
- }
-
- /**
- * @param time time that compaction took
- * @param size bytesize of storefiles in the compaction
- */
- public synchronized void addCompaction(long time, long size) {
- this.compactionTime.update(time);
- this.compactionSize.update(size);
- }
-
- /**
- * @param flushes history in