diff --git hbase-hadoop-compat/pom.xml hbase-hadoop-compat/pom.xml index d136664..be98522 100644 --- hbase-hadoop-compat/pom.xml +++ hbase-hadoop-compat/pom.xml @@ -58,11 +58,20 @@ + commons-logging commons-logging + + com.google.inject + guice + + + com.google.inject.extensions + guice-assistedinject + diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java deleted file mode 100644 index 7fbf518..0000000 --- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.master.metrics.MasterMetricsSource; - -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.ServiceLoader; - -/** - * Factory for classes supplied by hadoop compatibility modules. - */ -public class CompatibilitySingletonFactory { - private static final Log LOG = LogFactory.getLog(CompatibilitySingletonFactory.class); - public static final String EXCEPTION_START = "Could not create "; - public static final String EXCEPTION_END = " Is the hadoop compatibility jar on the classpath?"; - - private static final Map instances = new HashMap(); - - /** - * Get the singleton instance of Any classes defined by compatibiliy jar's - * - * @return the singleton - */ - public static synchronized T getInstance(Class klass) { - T instance = (T) instances.get(klass); - if (instance == null) { - try { - ServiceLoader loader = ServiceLoader.load(klass); - Iterator it = loader.iterator(); - instance = it.next(); - if (it.hasNext()) { - StringBuilder msg = new StringBuilder(); - msg.append("ServiceLoader provided more than one implementation for class: ") - .append(klass) - .append(", using implementation: ").append(instance.getClass()) - .append(", other implementations: {"); - while (it.hasNext()) { - msg.append(it.next()).append(" "); - } - msg.append("}"); - LOG.warn(msg); - } - } catch (Exception e) { - throw new RuntimeException(createExceptionString(klass), e); - } catch (Error e) { - throw new RuntimeException(createExceptionString(klass), e); - } - - // If there was nothing returned and no exception then throw an exception. - if (instance == null) { - throw new RuntimeException(createExceptionString(klass)); - } - instances.put(klass, instance); - } - return instance; - } - - private static String createExceptionString(Class klass) { - return EXCEPTION_START + klass.toString() + EXCEPTION_END; - } - -} diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/HadoopCompatPlugin.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/HadoopCompatPlugin.java new file mode 100644 index 0000000..9e60697 --- /dev/null +++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/HadoopCompatPlugin.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import com.google.inject.Module; + +import java.util.Collection; +import java.util.Map; + +/** + * + */ +public interface HadoopCompatPlugin { + + public String getName(); + + public Map getModules(); + +} diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/HadoopCompatPluginFactory.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/HadoopCompatPluginFactory.java new file mode 100644 index 0000000..14e83fa --- /dev/null +++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/HadoopCompatPluginFactory.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import com.google.inject.Module; + +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.ServiceLoader; + +/** + * + */ +public class HadoopCompatPluginFactory { + + /** + * Get the singleton instance of ReplicationMetricsSource + * + * @return the singleton + */ + public static synchronized Map getModules() { + Map modules = new HashMap(); + for (HadoopCompatPlugin plugin : ServiceLoader.load(HadoopCompatPlugin.class)) { + modules.putAll(plugin.getModules()); + + } + return modules; + } + +} diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryTest.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryTest.java index f6fbfee..247a73e 100644 --- hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryTest.java +++ hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryTest.java @@ -18,18 +18,20 @@ package org.apache.hadoop.hbase.master.metrics; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import com.google.inject.Guice; +import org.apache.hadoop.hbase.HadoopCompatPluginFactory; import org.junit.Test; /** - * Test for the CompatibilitySingletonFactory and building MasterMetricsSource + * Test for the Guice and building MasterMetricsSource */ public class MasterMetricsSourceFactoryTest { @Test(expected=RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(MasterMetricsSource.class); + Guice.createInjector(HadoopCompatPluginFactory.getModules().values()) + .getInstance(MasterMetricsSource.class); } } diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceFactoryTest.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceFactoryTest.java deleted file mode 100644 index 4f567b6..0000000 --- hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceFactoryTest.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.replication.regionserver.metrics; - -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.junit.Test; - -/** - * Test for the CompatibilitySingletonFactory and building ReplicationMetricsSource - */ -public class ReplicationMetricsSourceFactoryTest { - - @Test(expected=RuntimeException.class) - public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(ReplicationMetricsSource.class); - } -} diff --git hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceTest.java hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceTest.java new file mode 100644 index 0000000..715b7af --- /dev/null +++ hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceTest.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver.metrics; + + +import org.junit.Test; +import com.google.inject.Guice; +import org.apache.hadoop.hbase.HadoopCompatPluginFactory; +import org.junit.Test; + +/** + * Test for the ReplicationMetricsSource where there's no hadoop compat backing classes. + */ +public class ReplicationMetricsSourceTest { + + @Test(expected=RuntimeException.class) + public void testGetInstanceNoHadoopCompat() throws Exception { + //This should throw an exception because there is no compat lib on the class path. + Guice.createInjector(HadoopCompatPluginFactory.getModules().values()) + .getInstance(ReplicationMetricsSource.class); + + } +} diff --git hbase-hadoop1-compat/pom.xml hbase-hadoop1-compat/pom.xml index 9a54c5c..357679d 100644 --- hbase-hadoop1-compat/pom.xml +++ hbase-hadoop1-compat/pom.xml @@ -93,5 +93,13 @@ limitations under the License. true test + + com.google.inject + guice + + + com.google.inject.extensions + guice-assistedinject + diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/HadoopOneCompatPlugin.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/HadoopOneCompatPlugin.java new file mode 100644 index 0000000..ceada85 --- /dev/null +++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/HadoopOneCompatPlugin.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import com.google.inject.Module; +import org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceModule; +import org.apache.hadoop.hbase.metrics.MBeanSourceModule; +import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceModule; + +import java.util.HashMap; +import java.util.Map; + +/** + * + */ +public class HadoopOneCompatPlugin implements HadoopCompatPlugin { + + @Override + public String getName() { + return this.getClass().toString(); + } + + @Override + public Map getModules() { + Map modules = new HashMap(); + modules.put("ReplicationMetrics", new ReplicationMetricsSourceModule()); + modules.put("MasterMetricsSource", new MasterMetricsSourceModule()); + modules.put("MBeanSource", new MBeanSourceModule()); + return modules; + + } +} diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java index 3d63ad3..a61b1e2 100644 --- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java +++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java @@ -22,18 +22,18 @@ import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong; import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong; -/** - * Hadoop1 implementation of MasterMetricsSource. - */ +import javax.inject.Inject; + +/** Hadoop1 implementation of MasterMetricsSource. */ public class MasterMetricsSourceImpl - extends BaseMetricsSourceImpl implements MasterMetricsSource { + extends BaseMetricsSourceImpl implements MasterMetricsSource { MetricMutableCounterLong clusterRequestsCounter; MetricMutableGaugeLong ritGauge; MetricMutableGaugeLong ritCountOverThresholdGauge; MetricMutableGaugeLong ritOldestAgeGauge; - + @Inject public MasterMetricsSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT); } diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceModule.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceModule.java new file mode 100644 index 0000000..523eb30 --- /dev/null +++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceModule.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.metrics; + +import com.google.inject.AbstractModule; + +import javax.inject.Singleton; + +/** + * + */ +public class MasterMetricsSourceModule extends AbstractModule { + + @Override + protected void configure() { + bind(MasterMetricsSource.class).to(MasterMetricsSourceImpl.class).in(Singleton.class); + } +} diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java index 5b9537e..0390635 100644 --- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java +++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java @@ -19,23 +19,14 @@ package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.metrics2.MetricsBuilder; -import org.apache.hadoop.metrics2.MetricsException; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; -import org.apache.hadoop.metrics2.lib.MetricMutable; import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong; import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong; import org.apache.hadoop.metrics2.source.JvmMetricsSource; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; - -/** - * Hadoop 1 implementation of BaseMetricsSource (using metrics2 framework) - */ +/** Hadoop 1 implementation of BaseMetricsSource (using metrics2 framework) */ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource { private static boolean defaultMetricsSystemInited = false; diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java index 2b48247..74a1469 100644 --- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java +++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java @@ -22,16 +22,15 @@ import org.apache.hadoop.metrics2.util.MBeans; import javax.management.ObjectName; -/** - * Hadoop1 metrics2 implementation of an object that registers MBeans. - */ +/** Hadoop1 metrics2 implementation of an object that registers MBeans. */ public class MBeanSourceImpl implements MBeanSource { /** * Register an mbean with the underlying metrics system + * * @param serviceName Metrics service/system name * @param metricsName name of the metrics obejct to expose - * @param theMbean the actual MBean + * @param theMbean the actual MBean * @return ObjectName from jmx */ @Override diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceModule.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceModule.java new file mode 100644 index 0000000..a9980fc --- /dev/null +++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceModule.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.metrics; + +import com.google.inject.AbstractModule; + +/** + * + */ +public class MBeanSourceModule extends AbstractModule { + + @Override + protected void configure() { + bind(MBeanSource.class).to(MBeanSourceImpl.class); + } +} diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java index 51daedc..bbfbfac 100644 --- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java +++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.replication.regionserver.metrics; import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; -import org.apache.hadoop.metrics2.MetricsSource; /** * Hadoop1 implementation of ReplicationMetricsSource. This provides access to metrics gauges and @@ -28,8 +27,6 @@ import org.apache.hadoop.metrics2.MetricsSource; public class ReplicationMetricsSourceImpl extends BaseMetricsSourceImpl implements ReplicationMetricsSource { - - public ReplicationMetricsSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT); } diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceModule.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceModule.java new file mode 100644 index 0000000..9d66a33 --- /dev/null +++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceModule.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver.metrics; + +import com.google.inject.AbstractModule; + +import javax.inject.Singleton; + +/** + * + */ +public class ReplicationMetricsSourceModule extends AbstractModule { + + @Override + protected void configure() { + bind(ReplicationMetricsSource.class).to(ReplicationMetricsSourceImpl.class).in(Singleton.class); + } +} diff --git hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java index b980454..10b037a 100644 --- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java +++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java @@ -18,24 +18,23 @@ package org.apache.hadoop.metrics2.lib; +import org.apache.hadoop.metrics2.MetricsException; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsTag; + import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import org.apache.hadoop.metrics2.MetricsException; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsTag; - /** - * An optional metrics registry class for creating and maintaining a - * collection of MetricsMutables, making writing metrics source easier. - * NOTE: this is a copy of org.apache.hadoop.metrics2.lib.MetricsRegistry with added one - * feature: metrics can be removed. When HADOOP-8313 is fixed, usages of this class - * should be substituted with org.apache.hadoop.metrics2.lib.MetricsRegistry. - * This implementation also provides handy methods for creating metrics dynamically. - * Another difference is that metricsMap & tagsMap implementation is substituted with - * concurrent map, as we allow dynamic metrics additions/removals. + * An optional metrics registry class for creating and maintaining a collection of MetricsMutables, + * making writing metrics source easier. NOTE: this is a copy of org.apache.hadoop.metrics2.lib.MetricsRegistry + * with added one feature: metrics can be removed. When HADOOP-8313 is fixed, usages of this class + * should be substituted with org.apache.hadoop.metrics2.lib.MetricsRegistry. This implementation + * also provides handy methods for creating metrics dynamically. Another difference is that + * metricsMap & tagsMap implementation is substituted with concurrent map, as we allow dynamic + * metrics additions/removals. */ public class DynamicMetricsRegistry { @@ -53,7 +52,8 @@ public class DynamicMetricsRegistry { /** * Construct the registry with a record name - * @param name of the record of the metrics + * + * @param name of the record of the metrics */ public DynamicMetricsRegistry(String name) { this.name = name; @@ -62,7 +62,8 @@ public class DynamicMetricsRegistry { /** * Construct the registry with a name and a metric factory - * @param name of the record of the metrics + * + * @param name of the record of the metrics * @param factory for creating new mutable metrics */ public DynamicMetricsRegistry(String name, MetricMutableFactory factory) { @@ -70,17 +71,16 @@ public class DynamicMetricsRegistry { this.mf = factory; } - /** - * @return the name of the metrics registry - */ + /** @return the name of the metrics registry */ public String name() { return name; } /** * Get a metric by name - * @param name of the metric - * @return the metric object + * + * @param name of the metric + * @return the metric object */ public MetricMutable get(String name) { return metricsMap.get(name); @@ -88,10 +88,11 @@ public class DynamicMetricsRegistry { /** * Create a mutable integer counter - * @param name of the metric + * + * @param name of the metric * @param description of the metric - * @param initValue of the metric - * @return a new counter object + * @param initValue of the metric + * @return a new counter object */ public MetricMutableCounterInt newCounter(String name, String description, int initValue) { @@ -101,10 +102,11 @@ public class DynamicMetricsRegistry { /** * Create a mutable long integer counter - * @param name of the metric + * + * @param name of the metric * @param description of the metric - * @param initValue of the metric - * @return a new counter object + * @param initValue of the metric + * @return a new counter object */ public MetricMutableCounterLong newCounter(String name, String description, long initValue) { @@ -114,10 +116,11 @@ public class DynamicMetricsRegistry { /** * Create a mutable integer gauge - * @param name of the metric + * + * @param name of the metric * @param description of the metric - * @param initValue of the metric - * @return a new gauge object + * @param initValue of the metric + * @return a new gauge object */ public MetricMutableGaugeInt newGauge(String name, String description, int initValue) { @@ -127,10 +130,11 @@ public class DynamicMetricsRegistry { /** * Create a mutable long integer gauge - * @param name of the metric + * + * @param name of the metric * @param description of the metric - * @param initValue of the metric - * @return a new gauge object + * @param initValue of the metric + * @return a new gauge object */ public MetricMutableGaugeLong newGauge(String name, String description, long initValue) { @@ -140,12 +144,13 @@ public class DynamicMetricsRegistry { /** * Create a mutable metric with stats - * @param name of the metric + * + * @param name of the metric * @param description of the metric * @param sampleName of the metric (e.g., "ops") * @param valueName of the metric (e.g., "time" or "latency") * @param extended produce extended stat (stdev, min/max etc.) if true. - * @return a new metric object + * @return a new metric object */ public MetricMutableStat newStat(String name, String description, String sampleName, String valueName, @@ -157,11 +162,12 @@ public class DynamicMetricsRegistry { /** * Create a mutable metric with stats - * @param name of the metric + * + * @param name of the metric * @param description of the metric * @param sampleName of the metric (e.g., "ops") * @param valueName of the metric (e.g., "time" or "latency") - * @return a new metric object + * @return a new metric object */ public MetricMutableStat newStat(String name, String description, String sampleName, String valueName) { @@ -170,7 +176,8 @@ public class DynamicMetricsRegistry { /** * Create a mutable metric with stats using the name only - * @param name of the metric + * + * @param name of the metric * @return a new metric object */ public MetricMutableStat newStat(String name) { @@ -179,6 +186,7 @@ public class DynamicMetricsRegistry { /** * Set the metrics context tag + * * @param name of the context * @return the registry itself as a convenience */ @@ -188,10 +196,11 @@ public class DynamicMetricsRegistry { /** * Add a tag to the metrics - * @param name of the tag + * + * @param name of the tag * @param description of the tag - * @param value of the tag - * @return the registry (for keep adding tags) + * @param value of the tag + * @return the registry (for keep adding tags) */ public DynamicMetricsRegistry tag(String name, String description, String value) { return tag(name, description, value, false); @@ -199,20 +208,21 @@ public class DynamicMetricsRegistry { /** * Add a tag to the metrics - * @param name of the tag + * + * @param name of the tag * @param description of the tag - * @param value of the tag - * @param override existing tag if true - * @return the registry (for keep adding tags) + * @param value of the tag + * @param override existing tag if true + * @return the registry (for keep adding tags) */ public DynamicMetricsRegistry tag(String name, String description, String value, - boolean override) { + boolean override) { MetricsTag tag = new MetricsTag(name, description, value); if (!override) { MetricsTag existing = tagsMap.putIfAbsent(name, tag); if (existing != null) { - throw new MetricsException("Tag "+ name +" already exists!"); + throw new MetricsException("Tag " + name + " already exists!"); } return this; } @@ -224,7 +234,8 @@ public class DynamicMetricsRegistry { /** * Get the tags - * @return the tags set + * + * @return the tags set */ public Set> tags() { return tagsMap.entrySet(); @@ -232,7 +243,8 @@ public class DynamicMetricsRegistry { /** * Get the metrics - * @return the metrics set + * + * @return the metrics set */ public Set> metrics() { return metricsMap.entrySet(); @@ -240,8 +252,9 @@ public class DynamicMetricsRegistry { /** * Sample all the mutable metrics and put the snapshot in the builder + * * @param builder to contain the metrics snapshot - * @param all get all the metrics even if the values are not changed. + * @param all get all the metrics even if the values are not changed. */ public void snapshot(MetricsRecordBuilder builder, boolean all) { for (Entry entry : tags()) { @@ -254,6 +267,7 @@ public class DynamicMetricsRegistry { /** * Removes metric by name + * * @param name name of the metric to remove */ public void removeMetric(String name) { @@ -261,8 +275,7 @@ public class DynamicMetricsRegistry { } /** - * Get a MetricMutableGaugeLong from the storage. If it is not there - * atomically put it. + * Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it. * * @param gaugeName name of the gauge to create or get. * @param potentialStartingValue value of the new counter if we have to create it. @@ -278,9 +291,9 @@ public class DynamicMetricsRegistry { //Create the potential new gauge. MetricMutableGaugeLong newGauge = new MetricMutableGaugeLong(gaugeName, "", - potentialStartingValue); + potentialStartingValue); - // Try and put the gauge in. This is atomic. + // Try and put the gauge in. This is atomic. metric = metricsMap.putIfAbsent(gaugeName, newGauge); //If the value we get back is null then the put was successful and we will @@ -293,15 +306,14 @@ public class DynamicMetricsRegistry { if (!(metric instanceof MetricMutableGaugeLong)) { throw new MetricsException("Metric already exists in registry for metric name: " + - name + " and not of type MetricMutableGaugeLong"); + name + " and not of type MetricMutableGaugeLong"); } return (MetricMutableGaugeLong) metric; } /** - * Get a MetricMutableCounterLong from the storage. If it is not there - * atomically put it. + * Get a MetricMutableCounterLong from the storage. If it is not there atomically put it. * * @param counterName Name of the counter to get * @param potentialStartingValue starting value if we have to create a new counter @@ -313,7 +325,7 @@ public class DynamicMetricsRegistry { MetricMutable counter = metricsMap.get(counterName); if (counter == null) { MetricMutableCounterLong newCounter = - new MetricMutableCounterLong(counterName, "", potentialStartingValue); + new MetricMutableCounterLong(counterName, "", potentialStartingValue); counter = metricsMap.putIfAbsent(counterName, newCounter); if (counter == null) { return newCounter; @@ -322,13 +334,13 @@ public class DynamicMetricsRegistry { if (!(counter instanceof MetricMutableCounterLong)) { throw new MetricsException("Metric already exists in registry for metric name: " + - name + "and not of type MetricMutableCounterLong"); + name + "and not of type MetricMutableCounterLong"); } return (MetricMutableCounterLong) counter; } - private T + private T addNewMetricIfAbsent(String name, T ret, Class metricClass) { @@ -343,11 +355,11 @@ public class DynamicMetricsRegistry { return returnExistingWithCast(metric, metricClass, name); } - private T returnExistingWithCast(MetricMutable metric, - Class metricClass, String name) { + private T returnExistingWithCast(MetricMutable metric, + Class metricClass, String name) { if (!metricClass.isAssignableFrom(metric.getClass())) { throw new MetricsException("Metric already exists in registry for metric name: " + - name + " and not of type " + metricClass); + name + " and not of type " + metricClass); } return (T) metric; diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.HadoopCompatPlugin hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.HadoopCompatPlugin new file mode 100644 index 0000000..7946bca --- /dev/null +++ hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.HadoopCompatPlugin @@ -0,0 +1 @@ +org.apache.hadoop.hbase.HadoopOneCompatPlugin \ No newline at end of file diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSource hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSource deleted file mode 100644 index fcd2f56..0000000 --- hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSource +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceImpl diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.metrics.MBeanSource hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.metrics.MBeanSource deleted file mode 100644 index 48aeafc..0000000 --- hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.metrics.MBeanSource +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.metrics.MBeanSourceImpl diff --git hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource deleted file mode 100644 index bb64ad5..0000000 --- hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceImpl \ No newline at end of file diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImplTest.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImplTest.java index 000cf69..10641c8 100644 --- hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImplTest.java +++ hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImplTest.java @@ -18,23 +18,23 @@ package org.apache.hadoop.hbase.master.metrics; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import com.google.inject.Guice; +import com.google.inject.Injector; +import org.apache.hadoop.hbase.HadoopCompatPluginFactory; import org.junit.Test; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; -/** - * Test for MasterMetricsSourceImpl - */ +/** Test for MasterMetricsSourceImpl */ public class MasterMetricsSourceImplTest { @Test public void testGetInstance() throws Exception { - MasterMetricsSource rms = CompatibilitySingletonFactory - .getInstance(MasterMetricsSource.class); + Injector inj = Guice.createInjector(HadoopCompatPluginFactory.getModules().values()); + MasterMetricsSource rms = inj.getInstance(MasterMetricsSource.class); assertTrue(rms instanceof MasterMetricsSourceImpl); - assertSame(rms, CompatibilitySingletonFactory.getInstance(MasterMetricsSource.class)); + assertSame(rms, inj.getInstance(MasterMetricsSource.class)); } } diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImplTest.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImplTest.java index cdf12fe..e8c0031 100644 --- hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImplTest.java +++ hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImplTest.java @@ -23,13 +23,9 @@ import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong; import org.junit.BeforeClass; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; +import static org.junit.Assert.*; -/** - * Test of the default BaseMetricsSource implementation for hadoop 1 - */ +/** Test of the default BaseMetricsSource implementation for hadoop 1 */ public class BaseMetricsSourceImplTest { private static BaseMetricsSourceImpl bmsi; diff --git hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImplTest.java hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImplTest.java index 66385a9..5306dbf 100644 --- hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImplTest.java +++ hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImplTest.java @@ -18,20 +18,27 @@ package org.apache.hadoop.hbase.replication.regionserver.metrics; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import com.google.inject.Guice; +import org.apache.hadoop.hbase.HadoopCompatPluginFactory; import org.junit.Test; import static org.junit.Assert.assertTrue; -/** - * Test to make sure that ReplicationMetricsSourceImpl is hooked up to ServiceLoader - */ +/** Test for ReplicationMetricsSourceImpl */ public class ReplicationMetricsSourceImplTest { + /** + * Make sure that the guice is wired up. + * + * @throws Exception + */ @Test public void testGetInstance() throws Exception { - ReplicationMetricsSource rms = CompatibilitySingletonFactory - .getInstance(ReplicationMetricsSource.class); + + ReplicationMetricsSource rms = + Guice.createInjector(HadoopCompatPluginFactory.getModules().values()) + .getInstance(ReplicationMetricsSource.class); + assertTrue(rms instanceof ReplicationMetricsSourceImpl); } } diff --git hbase-hadoop2-compat/pom.xml hbase-hadoop2-compat/pom.xml index a3c307a..fe045a9 100644 --- hbase-hadoop2-compat/pom.xml +++ hbase-hadoop2-compat/pom.xml @@ -129,5 +129,14 @@ limitations under the License. hadoop-minicluster ${hadoop-two.version} + + + com.google.inject + guice + + + com.google.inject.extensions + guice-assistedinject + diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/HadoopTwoCompatPlugin.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/HadoopTwoCompatPlugin.java new file mode 100644 index 0000000..c63d25c --- /dev/null +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/HadoopTwoCompatPlugin.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import com.google.inject.Module; +import org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceModule; +import org.apache.hadoop.hbase.metrics.MBeanSourceModule; +import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceModule; + +import java.util.HashMap; +import java.util.Map; + +/** + * + */ +public class HadoopTwoCompatPlugin implements HadoopCompatPlugin { + + @Override + public String getName() { + return this.getClass().toString(); + } + + @Override + public Map getModules() { + Map modules = new HashMap(); + modules.put("ReplicationMetrics", new ReplicationMetricsSourceModule()); + modules.put("MasterMetricsSource", new MasterMetricsSourceModule()); + modules.put("MBeanSource", new MBeanSourceModule()); + return modules; + + } +} diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java index 010ed18..afe52eb 100644 --- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java @@ -22,11 +22,9 @@ import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; -/** - * Hadoop2 implementation of MasterMetricsSource. - */ +/** Hadoop2 implementation of MasterMetricsSource. */ public class MasterMetricsSourceImpl - extends BaseMetricsSourceImpl implements MasterMetricsSource { + extends BaseMetricsSourceImpl implements MasterMetricsSource { MutableCounterLong clusterRequestsCounter; MutableGaugeLong ritGauge; diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceModule.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceModule.java new file mode 100644 index 0000000..523eb30 --- /dev/null +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceModule.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.metrics; + +import com.google.inject.AbstractModule; + +import javax.inject.Singleton; + +/** + * + */ +public class MasterMetricsSourceModule extends AbstractModule { + + @Override + protected void configure() { + bind(MasterMetricsSource.class).to(MasterMetricsSourceImpl.class).in(Singleton.class); + } +} diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java index 85db6d0..c5d5156 100644 --- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java @@ -26,9 +26,7 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.source.JvmMetrics; -/** - * Hadoop 2 implementation of BaseMetricsSource (using metrics2 framework) - */ +/** Hadoop 2 implementation of BaseMetricsSource (using metrics2 framework) */ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource { private static boolean defaultMetricsSystemInited = false; diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java index 036c9a6..fb96174 100644 --- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java @@ -22,16 +22,15 @@ import org.apache.hadoop.metrics2.util.MBeans; import javax.management.ObjectName; -/** - * Hadoop2 metrics2 implementation of an object that registers MBeans. - */ +/** Hadoop2 metrics2 implementation of an object that registers MBeans. */ public class MBeanSourceImpl implements MBeanSource { /** * Register an mbean with the underlying metrics system + * * @param serviceName Metrics service/system name * @param metricsName name of the metrics obejct to expose - * @param theMbean the actual MBean + * @param theMbean the actual MBean * @return ObjectName from jmx */ @Override diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceModule.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceModule.java new file mode 100644 index 0000000..a9980fc --- /dev/null +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceModule.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.metrics; + +import com.google.inject.AbstractModule; + +/** + * + */ +public class MBeanSourceModule extends AbstractModule { + + @Override + protected void configure() { + bind(MBeanSource.class).to(MBeanSourceImpl.class); + } +} diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java index e7285dd..a240206 100644 --- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.replication.regionserver.metrics; import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; -import org.apache.hadoop.metrics2.MetricsSource; /** * Hadoop2 implementation of ReplicationMetricsSource. This provides access to metrics gauges and diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceModule.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceModule.java new file mode 100644 index 0000000..9d66a33 --- /dev/null +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceModule.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver.metrics; + +import com.google.inject.AbstractModule; + +import javax.inject.Singleton; + +/** + * + */ +public class ReplicationMetricsSourceModule extends AbstractModule { + + @Override + protected void configure() { + bind(ReplicationMetricsSource.class).to(ReplicationMetricsSourceImpl.class).in(Singleton.class); + } +} diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java index c55ef2d..4fa750e 100644 --- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java @@ -18,9 +18,6 @@ package org.apache.hadoop.metrics2.lib; -import java.util.Collection; -import java.util.concurrent.ConcurrentMap; - import com.google.common.base.Objects; import com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; @@ -31,29 +28,32 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.impl.MsInfo; +import java.util.Collection; +import java.util.concurrent.ConcurrentMap; + /** - * An optional metrics registry class for creating and maintaining a - * collection of MetricsMutables, making writing metrics source easier. - * NOTE: this is a copy of org.apache.hadoop.metrics2.lib.MetricsRegistry with added one - * feature: metrics can be removed. When HADOOP-8313 is fixed, usages of this class - * should be substituted with org.apache.hadoop.metrics2.lib.MetricsRegistry. - * This implementation also provides handy methods for creating metrics - * dynamically. - * Another difference is that metricsMap implementation is substituted with - * thread-safe map, as we allow dynamic metrics additions/removals. + * An optional metrics registry class for creating and maintaining a collection of MetricsMutables, + * making writing metrics source easier. NOTE: this is a copy of org.apache.hadoop.metrics2.lib.MetricsRegistry + * with added one feature: metrics can be removed. When HADOOP-8313 is fixed, usages of this class + * should be substituted with org.apache.hadoop.metrics2.lib.MetricsRegistry. This implementation + * also provides handy methods for creating metrics dynamically. Another difference is that + * metricsMap implementation is substituted with thread-safe map, as we allow dynamic metrics + * additions/removals. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class DynamicMetricsRegistry { + private final ConcurrentMap metricsMap = - Maps.newConcurrentMap(); + Maps.newConcurrentMap(); private final ConcurrentMap tagsMap = - Maps.newConcurrentMap(); + Maps.newConcurrentMap(); private final MetricsInfo metricsInfo; /** * Construct the registry with a record name - * @param name of the record of the metrics + * + * @param name of the record of the metrics */ public DynamicMetricsRegistry(String name) { metricsInfo = Interns.info(name, name); @@ -61,22 +61,22 @@ public class DynamicMetricsRegistry { /** * Construct the registry with a metadata object - * @param info the info object for the metrics record/group + * + * @param info the info object for the metrics record/group */ public DynamicMetricsRegistry(MetricsInfo info) { metricsInfo = info; } - /** - * @return the info object of the metrics registry - */ + /** @return the info object of the metrics registry */ public MetricsInfo info() { return metricsInfo; } /** * Get a metric by name - * @param name of the metric + * + * @param name of the metric * @return the metric object */ public MutableMetric get(String name) { @@ -85,7 +85,8 @@ public class DynamicMetricsRegistry { /** * Get a tag by name - * @param name of the tag + * + * @param name of the tag * @return the tag object */ public MetricsTag getTag(String name) { @@ -94,9 +95,10 @@ public class DynamicMetricsRegistry { /** * Create a mutable integer counter - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new counter object */ public MutableCounterInt newCounter(String name, String desc, int iVal) { @@ -105,8 +107,9 @@ public class DynamicMetricsRegistry { /** * Create a mutable integer counter - * @param info metadata of the metric - * @param iVal initial value + * + * @param info metadata of the metric + * @param iVal initial value * @return a new counter object */ public MutableCounterInt newCounter(MetricsInfo info, int iVal) { @@ -116,9 +119,10 @@ public class DynamicMetricsRegistry { /** * Create a mutable long integer counter - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new counter object */ public MutableCounterLong newCounter(String name, String desc, long iVal) { @@ -127,8 +131,9 @@ public class DynamicMetricsRegistry { /** * Create a mutable long integer counter - * @param info metadata of the metric - * @param iVal initial value + * + * @param info metadata of the metric + * @param iVal initial value * @return a new counter object */ public MutableCounterLong newCounter(MetricsInfo info, long iVal) { @@ -138,18 +143,21 @@ public class DynamicMetricsRegistry { /** * Create a mutable integer gauge - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new gauge object */ public MutableGaugeInt newGauge(String name, String desc, int iVal) { return newGauge(Interns.info(name, desc), iVal); } + /** * Create a mutable integer gauge - * @param info metadata of the metric - * @param iVal initial value + * + * @param info metadata of the metric + * @param iVal initial value * @return a new gauge object */ public MutableGaugeInt newGauge(MetricsInfo info, int iVal) { @@ -159,9 +167,10 @@ public class DynamicMetricsRegistry { /** * Create a mutable long integer gauge - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new gauge object */ public MutableGaugeLong newGauge(String name, String desc, long iVal) { @@ -170,8 +179,9 @@ public class DynamicMetricsRegistry { /** * Create a mutable long integer gauge - * @param info metadata of the metric - * @param iVal initial value + * + * @param info metadata of the metric + * @param iVal initial value * @return a new gauge object */ public MutableGaugeLong newGauge(MetricsInfo info, long iVal) { @@ -181,15 +191,16 @@ public class DynamicMetricsRegistry { /** * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") - * @param extended produce extended stat (stdev, min/max etc.) if true. + * + * @param name of the metric + * @param desc metric description + * @param sampleName of the metric (e.g., "Ops") + * @param valueName of the metric (e.g., "Time" or "Latency") + * @param extended produce extended stat (stdev, min/max etc.) if true. * @return a new mutable stat metric object */ public MutableStat newStat(String name, String desc, - String sampleName, String valueName, boolean extended) { + String sampleName, String valueName, boolean extended) { MutableStat ret = new MutableStat(name, desc, sampleName, valueName, extended); return addNewMetricIfAbsent(name, ret, MutableStat.class); @@ -197,10 +208,11 @@ public class DynamicMetricsRegistry { /** * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") + * + * @param name of the metric + * @param desc metric description + * @param sampleName of the metric (e.g., "Ops") + * @param valueName of the metric (e.g., "Time" or "Latency") * @return a new mutable metric object */ public MutableStat newStat(String name, String desc, @@ -210,7 +222,8 @@ public class DynamicMetricsRegistry { /** * Create a mutable rate metric - * @param name of the metric + * + * @param name of the metric * @return a new mutable metric object */ public MutableRate newRate(String name) { @@ -219,7 +232,8 @@ public class DynamicMetricsRegistry { /** * Create a mutable rate metric - * @param name of the metric + * + * @param name of the metric * @param description of the metric * @return a new mutable rate metric object */ @@ -229,9 +243,10 @@ public class DynamicMetricsRegistry { /** * Create a mutable rate metric (for throughput measurement) - * @param name of the metric - * @param desc description - * @param extended produce extended stat (stdev/min/max etc.) if true + * + * @param name of the metric + * @param desc description + * @param extended produce extended stat (stdev/min/max etc.) if true * @return a new mutable rate metric object */ public MutableRate newRate(String name, String desc, boolean extended) { @@ -240,13 +255,13 @@ public class DynamicMetricsRegistry { @InterfaceAudience.Private public MutableRate newRate(String name, String desc, - boolean extended, boolean returnExisting) { + boolean extended, boolean returnExisting) { if (returnExisting) { MutableMetric rate = metricsMap.get(name); if (rate != null) { if (rate instanceof MutableRate) return (MutableRate) rate; - throw new MetricsException("Unexpected metrics type "+ rate.getClass() - +" for "+ name); + throw new MetricsException("Unexpected metrics type " + rate.getClass() + + " for " + name); } } MutableRate ret = new MutableRate(name, desc, extended); @@ -260,6 +275,7 @@ public class DynamicMetricsRegistry { /** * Add sample to a stat metric by name. + * * @param name of the metric * @param value of the snapshot to add */ @@ -269,12 +285,10 @@ public class DynamicMetricsRegistry { if (m != null) { if (m instanceof MutableStat) { ((MutableStat) m).add(value); + } else { + throw new MetricsException("Unsupported add(value) for metric " + name); } - else { - throw new MetricsException("Unsupported add(value) for metric "+ name); - } - } - else { + } else { metricsMap.put(name, newRate(name)); // default is a rate metric add(name, value); } @@ -282,6 +296,7 @@ public class DynamicMetricsRegistry { /** * Set the metrics context tag + * * @param name of the context * @return the registry itself as a convenience */ @@ -291,9 +306,10 @@ public class DynamicMetricsRegistry { /** * Add a tag to the metrics - * @param name of the tag + * + * @param name of the tag * @param description of the tag - * @param value of the tag + * @param value of the tag * @return the registry (for keep adding tags) */ public DynamicMetricsRegistry tag(String name, String description, String value) { @@ -302,21 +318,23 @@ public class DynamicMetricsRegistry { /** * Add a tag to the metrics - * @param name of the tag + * + * @param name of the tag * @param description of the tag - * @param value of the tag - * @param override existing tag if true + * @param value of the tag + * @param override existing tag if true * @return the registry (for keep adding tags) */ public DynamicMetricsRegistry tag(String name, String description, String value, - boolean override) { + boolean override) { return tag(Interns.info(name, description), value, override); } /** * Add a tag to the metrics - * @param info metadata of the tag - * @param value of the tag + * + * @param info metadata of the tag + * @param value of the tag * @param override existing tag if true * @return the registry (for keep adding tags etc.) */ @@ -326,7 +344,7 @@ public class DynamicMetricsRegistry { if (!override) { MetricsTag existing = tagsMap.putIfAbsent(info.name(), tag); if (existing != null) { - throw new MetricsException("Tag "+ info.name() +" already exists!"); + throw new MetricsException("Tag " + info.name() + " already exists!"); } return this; } @@ -350,8 +368,9 @@ public class DynamicMetricsRegistry { /** * Sample all the mutable metrics and put the snapshot in the builder + * * @param builder to contain the metrics snapshot - * @param all get all the metrics even if the values are not changed. + * @param all get all the metrics even if the values are not changed. */ public void snapshot(MetricsRecordBuilder builder, boolean all) { for (MetricsTag tag : tags()) { @@ -362,14 +381,16 @@ public class DynamicMetricsRegistry { } } - @Override public String toString() { + @Override + public String toString() { return Objects.toStringHelper(this) - .add("info", metricsInfo).add("tags", tags()).add("metrics", metrics()) - .toString(); + .add("info", metricsInfo).add("tags", tags()).add("metrics", metrics()) + .toString(); } /** * Removes metric by name + * * @param name name of the metric to remove */ public void removeMetric(String name) { @@ -392,7 +413,7 @@ public class DynamicMetricsRegistry { //Create the potential new gauge. MutableGaugeLong newGauge = new MutableGaugeLong(Interns.info(gaugeName, ""), - potentialStartingValue); + potentialStartingValue); // Try and put the gauge in. This is atomic. metric = metricsMap.putIfAbsent(gaugeName, newGauge); @@ -406,7 +427,7 @@ public class DynamicMetricsRegistry { if (!(metric instanceof MutableGaugeLong)) { throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + - " and not of type MetricMutableGaugeLong"); + " and not of type MetricMutableGaugeLong"); } return (MutableGaugeLong) metric; @@ -424,23 +445,22 @@ public class DynamicMetricsRegistry { MutableMetric counter = metricsMap.get(counterName); if (counter == null) { MutableCounterLong newCounter = - new MutableCounterLong(Interns.info(counterName, ""), potentialStartingValue); + new MutableCounterLong(Interns.info(counterName, ""), potentialStartingValue); counter = metricsMap.putIfAbsent(counterName, newCounter); if (counter == null) { return newCounter; } } - if (!(counter instanceof MutableCounterLong)) { throw new MetricsException("Metric already exists in registry for metric name: " + - counterName + " and not of type MetricMutableCounterLong"); + counterName + " and not of type MetricMutableCounterLong"); } return (MutableCounterLong) counter; } - private T + private T addNewMetricIfAbsent(String name, T ret, Class metricClass) { @@ -455,12 +475,12 @@ public class DynamicMetricsRegistry { return returnExistingWithCast(metric, metricClass, name); } - private T returnExistingWithCast(MutableMetric metric, - Class metricClass, String name) { + private T returnExistingWithCast(MutableMetric metric, + Class metricClass, String name) { if (!metricClass.isAssignableFrom(metric.getClass())) { throw new MetricsException("Metric already exists in registry for metric name: " + - name + " and not of type " + metricClass + - " but instead of type " + metric.getClass()); + name + " and not of type " + metricClass + + " but instead of type " + metric.getClass()); } return (T) metric; diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.HadoopCompatPlugin hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.HadoopCompatPlugin new file mode 100644 index 0000000..490cabf --- /dev/null +++ hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.HadoopCompatPlugin @@ -0,0 +1 @@ +org.apache.hadoop.hbase.HadoopTwoCompatPlugin \ No newline at end of file diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSource hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSource deleted file mode 100644 index fcd2f56..0000000 --- hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSource +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceImpl diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.metrics.MBeanSource hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.metrics.MBeanSource deleted file mode 100644 index 48aeafc..0000000 --- hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.metrics.MBeanSource +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.metrics.MBeanSourceImpl diff --git hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource deleted file mode 100644 index bb64ad5..0000000 --- hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceImpl \ No newline at end of file diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImplTest.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImplTest.java index 000cf69..10641c8 100644 --- hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImplTest.java +++ hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImplTest.java @@ -18,23 +18,23 @@ package org.apache.hadoop.hbase.master.metrics; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import com.google.inject.Guice; +import com.google.inject.Injector; +import org.apache.hadoop.hbase.HadoopCompatPluginFactory; import org.junit.Test; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; -/** - * Test for MasterMetricsSourceImpl - */ +/** Test for MasterMetricsSourceImpl */ public class MasterMetricsSourceImplTest { @Test public void testGetInstance() throws Exception { - MasterMetricsSource rms = CompatibilitySingletonFactory - .getInstance(MasterMetricsSource.class); + Injector inj = Guice.createInjector(HadoopCompatPluginFactory.getModules().values()); + MasterMetricsSource rms = inj.getInstance(MasterMetricsSource.class); assertTrue(rms instanceof MasterMetricsSourceImpl); - assertSame(rms, CompatibilitySingletonFactory.getInstance(MasterMetricsSource.class)); + assertSame(rms, inj.getInstance(MasterMetricsSource.class)); } } diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImplTest.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImplTest.java index 35dc62d..7c36a99 100644 --- hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImplTest.java +++ hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImplTest.java @@ -26,9 +26,7 @@ import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; -/** - * Test of default BaseMetricsSource for hadoop 2 - */ +/** Test of default BaseMetricsSource for hadoop 2 */ public class BaseMetricsSourceImplTest { private static BaseMetricsSourceImpl bmsi; diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImplTest.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImplTest.java index 93f1e4a..a5f0a91 100644 --- hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImplTest.java +++ hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImplTest.java @@ -18,7 +18,8 @@ package org.apache.hadoop.hbase.replication.regionserver.metrics; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import com.google.inject.Guice; +import org.apache.hadoop.hbase.HadoopCompatPluginFactory; import org.junit.Test; import static org.junit.Assert.assertTrue; @@ -26,10 +27,16 @@ import static org.junit.Assert.assertTrue; /** Test for ReplicationMetricsSourceImpl */ public class ReplicationMetricsSourceImplTest { + /** + * Make sure that the guice is wired up. + * + * @throws Exception + */ @Test public void testGetInstance() throws Exception { - ReplicationMetricsSource rms = CompatibilitySingletonFactory - .getInstance(ReplicationMetricsSource.class); + ReplicationMetricsSource rms = + Guice.createInjector(HadoopCompatPluginFactory.getModules().values()) + .getInstance(ReplicationMetricsSource.class); assertTrue(rms instanceof ReplicationMetricsSourceImpl); } } diff --git hbase-server/pom.xml hbase-server/pom.xml index 54aa41b..3507b65 100644 --- hbase-server/pom.xml +++ hbase-server/pom.xml @@ -299,6 +299,14 @@ + com.google.inject + guice + + + com.google.inject.extensions + guice-assistedinject + + io.netty netty diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseGuice.java hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseGuice.java new file mode 100644 index 0000000..18da28d --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseGuice.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Module; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.catalog.CatalogTrackerManager; +import org.apache.hadoop.hbase.executor.ExecutorServiceModule; +import org.apache.hadoop.hbase.ipc.RpcServerModule; +import org.apache.hadoop.hbase.master.ActiveMasterManagerModule; +import org.apache.hadoop.hbase.master.AssignmentManagerModule; +import org.apache.hadoop.hbase.master.CatalogJanitorModule; +import org.apache.hadoop.hbase.master.HMasterModule; +import org.apache.hadoop.hbase.master.LocalHMasterModule; +import org.apache.hadoop.hbase.master.MasterCoprocessorHostModule; +import org.apache.hadoop.hbase.master.MasterFileSystemModule; +import org.apache.hadoop.hbase.master.ServerManagerModule; +import org.apache.hadoop.hbase.master.balancer.LoadBalancerModule; +import org.apache.hadoop.hbase.master.metrics.MasterMetricsModule; +import org.apache.hadoop.hbase.regionserver.CompactSplitThreadModule; +import org.apache.hadoop.hbase.regionserver.CompactionCheckerModule; +import org.apache.hadoop.hbase.regionserver.HRegionServerModule; +import org.apache.hadoop.hbase.regionserver.HRegionThriftServerModule; +import org.apache.hadoop.hbase.regionserver.LeasesModule; +import org.apache.hadoop.hbase.regionserver.MemStoreFlusherModule; +import org.apache.hadoop.hbase.replication.ReplicationZookeeperModule; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationModule; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkModule; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManagerModule; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceModule; +import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationSourceMetricsModule; +import org.apache.hadoop.hbase.util.FSTableDescriptorsModule; +import org.apache.hadoop.hbase.util.SleeperModule; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTrackerModule; +import org.apache.hadoop.hbase.zookeeper.DrainingServerTrackerModule; +import org.apache.hadoop.hbase.zookeeper.MasterAddressTrackerModule; +import org.apache.hadoop.hbase.zookeeper.RegionServerTrackerModule; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcherModule; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +/** + * A Static class to help with creating Guice Injectors. + * This class adds in the normal Modules for standing up an hbase server/cluster. + */ +@InterfaceAudience.Private +public class HBaseGuice { + + public static Injector createInjector() { + Collection modules = makeDefaultModules().values(); + return Guice.createInjector(modules); + } + + public static Injector createLocalHBaseInjector() { + return Guice.createInjector(makeLocalHBaseModules().values()); + } + + public static Map makeDefaultModules() { + Map modules = new HashMap(); + + //Shared modules + modules.put("RpcServer", new RpcServerModule()); + modules.put("Sleeper", new SleeperModule()); + modules.put("FSTableDescriptors", new FSTableDescriptorsModule()); + modules.put("ExecutorService", new ExecutorServiceModule()); + + //ZK modules + modules.put("ZooKeeperWatcher", new ZooKeeperWatcherModule()); + modules.put("MasterAddressTracker", new MasterAddressTrackerModule()); + modules.put("ClusterStatusTracker", new ClusterStatusTrackerModule()); + modules.put("CatalogTracker", new CatalogTrackerManager()); + modules.put("DrainingServerTracker", new DrainingServerTrackerModule()); + + //Master Modules + modules.put("HMaster", new HMasterModule()); + modules.put("ActiveMasterManager", new ActiveMasterManagerModule()); + modules.put("AssignmentManager", new AssignmentManagerModule()); + modules.put("RegionServerTracker", new RegionServerTrackerModule()); + modules.put("LoadBalancer", new LoadBalancerModule()); + modules.put("MasterFilesystem", new MasterFileSystemModule()); + modules.put("MasterMetrics", new MasterMetricsModule()); + modules.put("ServerManager", new ServerManagerModule()); + modules.put("MasterCoprocessorHost", new MasterCoprocessorHostModule()); + modules.put("CatalogJanitor", new CatalogJanitorModule()); + + //RS modules + modules.put("HRegionServer", new HRegionServerModule()); + modules.put("MemStoreFlusher", new MemStoreFlusherModule()); + modules.put("CompactSplitThread", new CompactSplitThreadModule()); + modules.put("CompactionChecker", new CompactionCheckerModule()); + modules.put("Leases", new LeasesModule()); + modules.put("HRegionThriftServer", new HRegionThriftServerModule()); + + //Replication Modules + modules.put("Replication", new ReplicationModule()); + modules.put("ReplicationZookeeper", new ReplicationZookeeperModule()); + modules.put("ReplicationSourceManager", new ReplicationSourceManagerModule()); + modules.put("ReplicationSource", new ReplicationSourceModule()); + modules.put("ReplicationSourceMetrics", new ReplicationSourceMetricsModule()); + modules.put("ReplicationSink", new ReplicationSinkModule()); + + //Local HBase Cluster Modules + modules.put("LocalHBaseCluster", new LocalHBaseClusterModule()); + + //Hadoop Compat modules. + modules.putAll(HadoopCompatPluginFactory.getModules()); + return modules; + } + + public static Map makeLocalHBaseModules() { + Map modules = makeDefaultModules(); + + //Local HBase Cluster Modules + modules.put("LocalHBaseCluster", new LocalHBaseClusterModule()); + modules.put("HMaster", new LocalHMasterModule()); + return modules; + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index f550c7a..86929c9 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -25,15 +25,16 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.master.HMasterFactory; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HRegionServerFactory; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Threads; @@ -41,6 +42,9 @@ import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.util.JVMClusterUtil; +import javax.inject.Inject; +import javax.inject.Provider; + /** * This class creates a single process HBase cluster. One thread is created for * a master and one per region server. @@ -71,58 +75,8 @@ public class LocalHBaseCluster { /** 'local:' */ public static final String LOCAL_COLON = LOCAL + ":"; private final Configuration conf; - private final Class masterClass; - private final Class regionServerClass; - - /** - * Constructor. - * @param conf - * @throws IOException - */ - public LocalHBaseCluster(final Configuration conf) - throws IOException { - this(conf, DEFAULT_NO); - } - - /** - * Constructor. - * @param conf Configuration to use. Post construction has the master's - * address. - * @param noRegionServers Count of regionservers to start. - * @throws IOException - */ - public LocalHBaseCluster(final Configuration conf, final int noRegionServers) - throws IOException { - this(conf, 1, noRegionServers, getMasterImplementation(conf), - getRegionServerImplementation(conf)); - } - - /** - * Constructor. - * @param conf Configuration to use. Post construction has the active master - * address. - * @param noMasters Count of masters to start. - * @param noRegionServers Count of regionservers to start. - * @throws IOException - */ - public LocalHBaseCluster(final Configuration conf, final int noMasters, - final int noRegionServers) - throws IOException { - this(conf, noMasters, noRegionServers, getMasterImplementation(conf), - getRegionServerImplementation(conf)); - } - - @SuppressWarnings("unchecked") - private static Class getRegionServerImplementation(final Configuration conf) { - return (Class)conf.getClass(HConstants.REGION_SERVER_IMPL, - HRegionServer.class); - } - - @SuppressWarnings("unchecked") - private static Class getMasterImplementation(final Configuration conf) { - return (Class)conf.getClass(HConstants.MASTER_IMPL, - HMaster.class); - } + private final HMasterFactory masterFactory; + private final HRegionServerFactory regionServerFactory; /** * Constructor. @@ -130,87 +84,84 @@ public class LocalHBaseCluster { * address. * @param noMasters Count of masters to start. * @param noRegionServers Count of regionservers to start. - * @param masterClass - * @param regionServerClass * @throws IOException */ @SuppressWarnings("unchecked") - public LocalHBaseCluster(final Configuration conf, final int noMasters, - final int noRegionServers, final Class masterClass, - final Class regionServerClass) + @Inject + public LocalHBaseCluster(@Assisted final Configuration conf, + @Assisted("noMasters") final int noMasters, + @Assisted("noRegionServers") final int noRegionServers, + HMasterFactory masterFactory, + HRegionServerFactory regionServerFactory) throws IOException { this.conf = conf; + this.masterFactory = masterFactory; + this.regionServerFactory = regionServerFactory; + // Always have masters and regionservers come up on port '0' so we don't // clash over default ports. conf.set(HConstants.MASTER_PORT, "0"); conf.set(HConstants.REGIONSERVER_PORT, "0"); - this.masterClass = (Class) - conf.getClass(HConstants.MASTER_IMPL, masterClass); + // Start the HMasters. for (int i = 0; i < noMasters; i++) { - addMaster(new Configuration(conf), i); + addMaster(masterFactory, i); } // Start the HRegionServers. - this.regionServerClass = - (Class)conf.getClass(HConstants.REGION_SERVER_IMPL, - regionServerClass); for (int i = 0; i < noRegionServers; i++) { - addRegionServer(new Configuration(conf), i); + addRegionServer(regionServerFactory, i); } } public JVMClusterUtil.RegionServerThread addRegionServer() throws IOException { - return addRegionServer(new Configuration(conf), this.regionThreads.size()); + return addRegionServer(this.regionServerFactory, this.regionThreads.size()); } public JVMClusterUtil.RegionServerThread addRegionServer( - Configuration config, final int index) + HRegionServerFactory hrsf, final int index) throws IOException { // Create each regionserver with its own Configuration instance so each has // its HConnection instance rather than share (see HBASE_INSTANCES down in // the guts of HConnectionManager. JVMClusterUtil.RegionServerThread rst = - JVMClusterUtil.createRegionServerThread(config, - this.regionServerClass, index); + JVMClusterUtil.createRegionServerThread(hrsf, new Configuration(conf), index); this.regionThreads.add(rst); return rst; } - public JVMClusterUtil.RegionServerThread addRegionServer( - final Configuration config, final int index, User user) + public JVMClusterUtil.RegionServerThread addRegionServer(final int index, User user) throws IOException, InterruptedException { return user.runAs( new PrivilegedExceptionAction() { public JVMClusterUtil.RegionServerThread run() throws Exception { - return addRegionServer(config, index); + return addRegionServer(regionServerFactory, index); } }); } public JVMClusterUtil.MasterThread addMaster() throws IOException { - return addMaster(new Configuration(conf), this.masterThreads.size()); + return addMaster(this.masterFactory, this.masterThreads.size()); } - public JVMClusterUtil.MasterThread addMaster(Configuration c, final int index) + public JVMClusterUtil.MasterThread addMaster(HMasterFactory mf, final int index) throws IOException { // Create each master with its own Configuration instance so each has // its HConnection instance rather than share (see HBASE_INSTANCES down in // the guts of HConnectionManager. - JVMClusterUtil.MasterThread mt = JVMClusterUtil.createMasterThread(c, - (Class) conf.getClass(HConstants.MASTER_IMPL, this.masterClass), index); + JVMClusterUtil.MasterThread mt = JVMClusterUtil.createMasterThread(mf, new Configuration(conf), index); this.masterThreads.add(mt); return mt; } public JVMClusterUtil.MasterThread addMaster( - final Configuration c, final int index, User user) + final int index, User user) throws IOException, InterruptedException { return user.runAs( new PrivilegedExceptionAction() { public JVMClusterUtil.MasterThread run() throws Exception { - return addMaster(c, index); + return addMaster(masterFactory, index); } }); } @@ -439,20 +390,4 @@ public class LocalHBaseCluster { boolean mode = c.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); return(mode == HConstants.CLUSTER_IS_LOCAL); } - - /** - * Test things basically work. - * @param args - * @throws IOException - */ - public static void main(String[] args) throws IOException { - Configuration conf = HBaseConfiguration.create(); - LocalHBaseCluster cluster = new LocalHBaseCluster(conf); - cluster.startup(); - HBaseAdmin admin = new HBaseAdmin(conf); - HTableDescriptor htd = - new HTableDescriptor(Bytes.toBytes(cluster.getClass().getName())); - admin.createTable(htd); - cluster.shutdown(); - } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseClusterFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseClusterFactory.java new file mode 100644 index 0000000..1e9b4d5 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseClusterFactory.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import com.google.inject.assistedinject.Assisted; +import org.apache.hadoop.conf.Configuration; + +/** + * + */ +public interface LocalHBaseClusterFactory { + + public LocalHBaseCluster create(Configuration conf, + @Assisted("noMasters") int noMasters, + @Assisted("noRegionServers") int noRegionservers); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseClusterModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseClusterModule.java new file mode 100644 index 0000000..2d00c15 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseClusterModule.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.LocalHMaster; + +/** + * + */ +public class LocalHBaseClusterModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(LocalHBaseCluster.class, LocalHBaseCluster.class) + .build(LocalHBaseClusterFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java index 8a383e4..e19d779 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java @@ -26,6 +26,7 @@ import java.net.SocketTimeoutException; import java.net.UnknownHostException; import java.util.concurrent.atomic.AtomicBoolean; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -47,6 +48,8 @@ import org.apache.hadoop.hbase.zookeeper.RootRegionTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.ipc.RemoteException; +import javax.inject.Inject; + /** * Tracks the availability of the catalog tables -ROOT- and * .META.. @@ -181,9 +184,12 @@ public class CatalogTracker { * ({@link Object#wait(long)} when passed a 0 waits for ever). * @throws IOException */ - public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, - Abortable abortable, final int defaultTimeout) - throws IOException { + @Inject + public CatalogTracker(@Assisted final ZooKeeperWatcher zk, + @Assisted final Configuration conf, + @Assisted Abortable abortable, + @Assisted final int defaultTimeout) + throws IOException { this(zk, conf, HConnectionManager.getConnection(conf), abortable, defaultTimeout); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTrackerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTrackerFactory.java new file mode 100644 index 0000000..0403139 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTrackerFactory.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.catalog; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; + +import java.io.IOException; + +/** + * + */ +public interface CatalogTrackerFactory { + + /** + * Create a CatalogTracker + * + * @param zk TheZookeeperWatcher to use + * @param conf The Configuration for the server. This is passed in explicitly so that + * HConnectionManager will pool connections correctly. + * @param abortable Abortable to abort if there was an error. + * @param defaultTimeout Timeout to wait. + * @return + * @throws IOException + */ + public CatalogTracker create(final ZooKeeperWatcher zk, + final Configuration conf, + Abortable abortable, + final int defaultTimeout) throws IOException; + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTrackerManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTrackerManager.java new file mode 100644 index 0000000..fd7b5ff --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTrackerManager.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.catalog; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class CatalogTrackerManager extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(CatalogTracker.class, CatalogTracker.class) + .build(CatalogTrackerFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java index d8f019a..e73a2de 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java @@ -33,6 +33,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -43,6 +44,8 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import javax.inject.Inject; + /** * This is a generic executor service. This component abstracts a * threadpool, a queue to which {@link EventHandler.EventType}s can be submitted, @@ -110,7 +113,8 @@ public class ExecutorService { * Default constructor. * @param servername Name of the hosting server. */ - public ExecutorService(final String servername) { + @Inject + public ExecutorService(@Assisted final String servername) { super(); this.servername = servername; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorServiceFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorServiceFactory.java new file mode 100644 index 0000000..6dcf23e --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorServiceFactory.java @@ -0,0 +1,10 @@ +package org.apache.hadoop.hbase.executor; + +/** + * + */ +public interface ExecutorServiceFactory { + + public ExecutorService create(final String servername); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorServiceModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorServiceModule.java new file mode 100644 index 0000000..e8552f7 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorServiceModule.java @@ -0,0 +1,17 @@ +package org.apache.hadoop.hbase.executor; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class ExecutorServiceModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(ExecutorService.class, ExecutorService.class) + .build(ExecutorServiceFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerDefaultFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerDefaultFactory.java new file mode 100644 index 0000000..347a065 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerDefaultFactory.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ipc.HBaseRPC; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; + +import java.io.IOException; + +/** + * + */ +public class RpcServerDefaultFactory implements RpcServerFactory { + + @Override + public RpcServer create(Object instance, + Class[] ifaces, + String bindAddress, + int port, + int numHandlers, + int metaHandlerCount, + boolean verbose, + Configuration conf, + int highPriorityLevel) throws IOException { + return HBaseRPC.getServer(instance, + ifaces, + bindAddress, + port, + numHandlers, + metaHandlerCount, + verbose, + conf, + highPriorityLevel); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java new file mode 100644 index 0000000..2035840 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ipc.RpcServer; + +import java.io.IOException; + +/** + * + */ +public interface RpcServerFactory { + + public RpcServer create(final Object instance, + final Class[] ifaces, + final String bindAddress, + final int port, + final int numHandlers, + int metaHandlerCount, + final boolean verbose, + Configuration conf, + int highPriorityLevel) throws IOException; + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerModule.java new file mode 100644 index 0000000..4c0b8ba --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerModule.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import com.google.inject.AbstractModule; + +/** + * + */ +public class RpcServerModule extends AbstractModule { + + @Override + protected void configure() { + bind(RpcServerFactory.class).to(RpcServerDefaultFactory.class); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java index 84935d9..4af1a44 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -37,6 +38,8 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; +import javax.inject.Inject; + /** * Handles everything on master-side related to master election. * @@ -51,7 +54,7 @@ import org.apache.zookeeper.KeeperException; * the active master of the cluster. */ @InterfaceAudience.Private -class ActiveMasterManager extends ZooKeeperListener { +public class ActiveMasterManager extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(ActiveMasterManager.class); final AtomicBoolean clusterHasActiveMaster = new AtomicBoolean(false); @@ -60,11 +63,14 @@ class ActiveMasterManager extends ZooKeeperListener { private final Server master; /** - * @param watcher - * @param sn ServerName - * @param master In an instance of a Master. + * @param watcher The ZookeeperWatcher to use. + * @param sn ServerName + * @param master In an instance of a Master. */ - ActiveMasterManager(ZooKeeperWatcher watcher, ServerName sn, Server master) { + @Inject + ActiveMasterManager(@Assisted ZooKeeperWatcher watcher, + @Assisted ServerName sn, + @Assisted Server master) { super(watcher); this.sn = sn; this.master = master; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManagerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManagerFactory.java new file mode 100644 index 0000000..cf204c5 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManagerFactory.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; + +/** + * Interface of a Factory that can construct a ActiveMasterManager. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface ActiveMasterManagerFactory { + + public ActiveMasterManager create(ZooKeeperWatcher watcher, + ServerName sn, + Server master); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManagerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManagerModule.java new file mode 100644 index 0000000..0f0c3b5 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManagerModule.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class ActiveMasterManagerModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(ActiveMasterManager.class, ActiveMasterManager.class) + .build(ActiveMasterManagerFactory.class)); + + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index d6b41ec..f00f7de 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -38,6 +38,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -87,6 +88,8 @@ import org.apache.zookeeper.KeeperException.NoNodeException; import org.apache.zookeeper.KeeperException.NodeExistsException; import org.apache.zookeeper.data.Stat; +import javax.inject.Inject; + /** * Manages and performs region assignment. *

@@ -186,9 +189,14 @@ public class AssignmentManager extends ZooKeeperListener { * @throws KeeperException * @throws IOException */ - public AssignmentManager(Server master, ServerManager serverManager, - CatalogTracker catalogTracker, final LoadBalancer balancer, - final ExecutorService service, MasterMetrics metrics) throws KeeperException, IOException { + @Inject + public AssignmentManager(@Assisted Server master, + @Assisted ServerManager serverManager, + @Assisted CatalogTracker catalogTracker, + @Assisted final LoadBalancer balancer, + @Assisted final ExecutorService service, + @Assisted MasterMetrics metrics) + throws KeeperException, IOException { super(master.getZooKeeper()); this.master = master; this.serverManager = serverManager; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManagerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManagerFactory.java new file mode 100644 index 0000000..90326a1 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManagerFactory.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.executor.ExecutorService; +import org.apache.hadoop.hbase.master.metrics.MasterMetrics; + +/** + * Interface of a Factory that can construct a AssignmentManager. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface AssignmentManagerFactory { + + public AssignmentManager create(Server master, + ServerManager serverManager, + CatalogTracker catalogTracker, + LoadBalancer balancer, + ExecutorService service, + MasterMetrics metrics); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManagerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManagerModule.java new file mode 100644 index 0000000..f8a47ad --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManagerModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class AssignmentManagerModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(AssignmentManager.class, AssignmentManager.class) + .build(AssignmentManagerFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index 0249d79..4bec2ba 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -28,6 +28,7 @@ import java.util.TreeMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -52,22 +53,25 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Writables; +import javax.inject.Inject; + /** * A janitor for the catalog tables. Scans the .META. catalog * table on a period looking for unused regions to garbage collect. */ @InterfaceAudience.Private -class CatalogJanitor extends Chore { +public class CatalogJanitor extends Chore { private static final Log LOG = LogFactory.getLog(CatalogJanitor.class.getName()); private final Server server; private final MasterServices services; private AtomicBoolean enabled = new AtomicBoolean(true); private AtomicBoolean alreadyRunning = new AtomicBoolean(false); - CatalogJanitor(final Server server, final MasterServices services) { + @Inject + CatalogJanitor(@Assisted final Server server, @Assisted final MasterServices services) { super(server.getServerName() + "-CatalogJanitor", - server.getConfiguration().getInt("hbase.catalogjanitor.interval", 300000), - server); + server.getConfiguration().getInt("hbase.catalogjanitor.interval", 300000), + server); this.server = server; this.services = services; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitorFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitorFactory.java new file mode 100644 index 0000000..1b838db --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitorFactory.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Server; + +/** + * Interface of a Factory that can construct a CatalogJanitor. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface CatalogJanitorFactory { + + public CatalogJanitor create( final Server server, final MasterServices services); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitorModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitorModule.java new file mode 100644 index 0000000..ee74f25 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitorModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class CatalogJanitorModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(CatalogJanitor.class, CatalogJanitor.class) + .build(CatalogJanitorFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 31df068..6292cac 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -39,17 +37,17 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import javax.inject.Inject; import javax.management.ObjectName; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.DeserializationException; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -79,15 +77,18 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType; -import org.apache.hadoop.hbase.ipc.HBaseRPC; +import org.apache.hadoop.hbase.executor.ExecutorServiceFactory; import org.apache.hadoop.hbase.ipc.HBaseServer; +import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; +import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactoryImpl; import org.apache.hadoop.hbase.master.metrics.MXBeanImpl; import org.apache.hadoop.hbase.metrics.MBeanSource; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.master.metrics.MasterMetricsFactory; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.ipc.ProtocolSignature; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.master.cleaner.LogCleaner; import org.apache.hadoop.hbase.master.handler.CreateTableHandler; @@ -105,6 +106,36 @@ import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.replication.regionserver.Replication; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CompressionTest; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSTableDescriptorsFactory; +import org.apache.hadoop.hbase.util.HFileArchiveUtil; +import org.apache.hadoop.hbase.util.HasThread; +import org.apache.hadoop.hbase.util.InfoServer; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Sleeper; +import org.apache.hadoop.hbase.util.SleeperFactory; +import org.apache.hadoop.hbase.util.Strings; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.util.VersionInfo; +import org.apache.hadoop.hbase.catalog.CatalogTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.DrainingServerTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.RegionServerTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; +import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker; +import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcherFactory; +import org.apache.hadoop.metrics.util.MBeanUtil; +import org.apache.hadoop.net.DNS; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.Watcher; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; @@ -159,29 +190,6 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; -import org.apache.hadoop.hbase.replication.regionserver.Replication; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.CompressionTest; -import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.apache.hadoop.hbase.util.HFileArchiveUtil; -import org.apache.hadoop.hbase.util.HasThread; -import org.apache.hadoop.hbase.util.InfoServer; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.Sleeper; -import org.apache.hadoop.hbase.util.Strings; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.util.VersionInfo; -import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; -import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker; -import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; -import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.metrics.util.MBeanUtil; -import org.apache.hadoop.net.DNS; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.Watcher; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -302,6 +310,20 @@ Server { //should we check the compression codec type at master side, default true, HBASE-6370 private final boolean masterCheckCompression; + private final ClusterStatusTrackerFactory clusterStatusTrackerFactory; + private final ActiveMasterManagerFactory activeMasterManagerFactory; + private final CatalogTrackerFactory catalogTrackerFactory; + private final AssignmentManagerFactory assignmentManagerFactory; + private final RegionServerTrackerFactory regionServerTrackerFactory; + private final DrainingServerTrackerFactory drainingServerTrackerFactory; + private final LoadBalancerFactory loadBalancerFactory; + private final MasterFileSystemFactory masterFileSystemFactory; + private final FSTableDescriptorsFactory fsTableDescriptorsFactory; + private final ExecutorServiceFactory executorServiceFactory; + private final ServerManagerFactory serverManagerFactory; + private final MasterCoprocessorHostFactory masterCoprocessorHostFactory; + private final CatalogJanitorFactory catalogJanitorFactory; + private final MBeanSource mBeanSource; /** * Initializes the HMaster. The steps are as follows: @@ -315,9 +337,45 @@ Server { * run in their own thread rather than within the context of the constructor. * @throws InterruptedException */ - public HMaster(final Configuration conf) - throws IOException, KeeperException, InterruptedException { + @Inject + public HMaster(@Assisted final Configuration conf, + final RpcServerFactory rpcServerFactory, + final SleeperFactory sleeperFactory, + final ZooKeeperWatcherFactory zooKeeperWatcherFactory, + final MasterMetricsFactory masterMetricsFactory, + final ClusterStatusTrackerFactory clusterStatusTrackerFactory, + final ActiveMasterManagerFactory activeMasterManagerFactory, + final CatalogTrackerFactory catalogTrackerFactory, + final AssignmentManagerFactory assignmentManagerFactory, + final RegionServerTrackerFactory regionServerTrackerFactory, + final DrainingServerTrackerFactory drainingServerTrackerFactory, + final LoadBalancerFactory loadBalancerFactory, + final MasterFileSystemFactory masterFileSystemFactory, + final FSTableDescriptorsFactory fsTableDescriptorsFactory, + final ExecutorServiceFactory executorServiceFactory, + final ServerManagerFactory serverManagerFactory, + final MasterCoprocessorHostFactory masterCoprocessorHostFactory, + final CatalogJanitorFactory catalogJanitorFactory, + final MBeanSource mBeanSource) + throws IOException, KeeperException, InterruptedException { + //Store the factories that might be used later. + this.clusterStatusTrackerFactory = clusterStatusTrackerFactory; + this.activeMasterManagerFactory = activeMasterManagerFactory; + this.catalogTrackerFactory = catalogTrackerFactory; + this.assignmentManagerFactory = assignmentManagerFactory; + this.regionServerTrackerFactory = regionServerTrackerFactory; + this.drainingServerTrackerFactory = drainingServerTrackerFactory; + this.loadBalancerFactory = loadBalancerFactory; + this.masterFileSystemFactory = masterFileSystemFactory; + this.fsTableDescriptorsFactory = fsTableDescriptorsFactory; + this.executorServiceFactory = executorServiceFactory; + this.serverManagerFactory = serverManagerFactory; + this.masterCoprocessorHostFactory = masterCoprocessorHostFactory; + this.catalogJanitorFactory = catalogJanitorFactory; + this.mBeanSource = mBeanSource; + this.conf = new Configuration(conf); + this.stopSleeper = sleeperFactory.create(100, this); // Disable the block cache on the master this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); // Set how many times to retry talking to another server over HConnection. @@ -334,9 +392,10 @@ Server { } int numHandlers = conf.getInt("hbase.master.handler.count", conf.getInt("hbase.regionserver.handler.count", 25)); - this.rpcServer = HBaseRPC.getServer(MasterMonitorProtocol.class, this, - new Class[]{MasterMonitorProtocol.class, - MasterAdminProtocol.class, RegionServerStatusProtocol.class}, + + this.rpcServer = rpcServerFactory.create(this, + new Class[]{MasterMonitorProtocol.class, + MasterAdminProtocol.class, RegionServerStatusProtocol.class}, initialIsa.getHostName(), // BindAddress is IP we got for this server. initialIsa.getPort(), numHandlers, @@ -365,9 +424,9 @@ Server { this.conf.set("mapred.task.id", "hb_m_" + this.serverName.toString()); } - this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" + isa.getPort(), this, true); + this.zooKeeper = zooKeeperWatcherFactory.create(this.conf, MASTER + ":" + isa.getPort(), this, true); this.rpcServer.startThreads(); - this.metrics = new MasterMetrics(getServerName().toString()); + this.metrics = masterMetricsFactory.create(getServerName().toString()); // metrics interval: using the same property as region server. this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000); @@ -483,15 +542,14 @@ Server { throws InterruptedException { // TODO: This is wrong!!!! Should have new servername if we restart ourselves, // if we come back to life. - this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, - this); + this.activeMasterManager = activeMasterManagerFactory.create(zooKeeper, this.serverName, this); this.zooKeeper.registerListener(activeMasterManager); stallIfBackupMaster(this.conf, this.activeMasterManager); // The ClusterStatusTracker is setup before the other // ZKBasedSystemTrackers because it's needed by the activeMasterManager // to check if the cluster should be shutdown. - this.clusterStatusTracker = new ClusterStatusTracker(getZooKeeper(), this); + this.clusterStatusTracker = clusterStatusTrackerFactory.create(getZooKeeper(), this); this.clusterStatusTracker.start(); return this.activeMasterManager.blockUntilBecomingActiveMaster(startupStatus, this.clusterStatusTracker); @@ -504,20 +562,21 @@ Server { */ private void initializeZKBasedSystemTrackers() throws IOException, InterruptedException, KeeperException { - this.catalogTracker = createCatalogTracker(this.zooKeeper, this.conf, + this.catalogTracker = catalogTrackerFactory.create(this.zooKeeper, this.conf, this, conf.getInt("hbase.master.catalog.timeout", Integer.MAX_VALUE)); this.catalogTracker.start(); - this.balancer = LoadBalancerFactory.getLoadBalancer(conf); - this.assignmentManager = new AssignmentManager(this, serverManager, - this.catalogTracker, this.balancer, this.executorService, this.metrics); + this.balancer = loadBalancerFactory.create(); + this.assignmentManager = assignmentManagerFactory.create(this, serverManager, + this.catalogTracker, this.balancer, this.executorService, this.metrics); + zooKeeper.registerListenerFirst(assignmentManager); - this.regionServerTracker = new RegionServerTracker(zooKeeper, this, + this.regionServerTracker = regionServerTrackerFactory.create(zooKeeper, this, this.serverManager); this.regionServerTracker.start(); - this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, + this.drainingServerTracker = drainingServerTrackerFactory.create(zooKeeper, this, this.serverManager); this.drainingServerTracker.start(); @@ -532,27 +591,8 @@ Server { ", cluster-up flag was=" + wasUp); } - /** - * Create CatalogTracker. - * In its own method so can intercept and mock it over in tests. - * @param zk If zk is null, we'll create an instance (and shut it down - * when {@link #stop()} is called) else we'll use what is passed. - * @param conf - * @param abortable If fatal exception we'll call abort on this. May be null. - * If it is we'll use the Connection associated with the passed - * {@link Configuration} as our {@link Abortable}. - * @param defaultTimeout Timeout to use. Pass zero for no timeout - * ({@link Object#wait(long)} when passed a 0 waits for ever). - * @throws IOException - */ - CatalogTracker createCatalogTracker(final ZooKeeperWatcher zk, - final Configuration conf, Abortable abortable, final int defaultTimeout) - throws IOException { - return new CatalogTracker(zk, conf, abortable, defaultTimeout); - } - // Check if we should stop every 100ms - private Sleeper stopSleeper = new Sleeper(100, this); + private final Sleeper stopSleeper; private void loop() { long lastMsgTs = 0l; @@ -613,20 +653,19 @@ Server { status.setStatus("Initializing Master file system"); this.masterActiveTime = System.currentTimeMillis(); - // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring. - this.fileSystemManager = new MasterFileSystem(this, this, metrics, masterRecovery); + this.fileSystemManager = masterFileSystemFactory.create(this.conf, this, this, metrics, masterRecovery); this.tableDescriptors = - new FSTableDescriptors(this.fileSystemManager.getFileSystem(), - this.fileSystemManager.getRootDir()); + fsTableDescriptorsFactory.create(this.fileSystemManager.getFileSystem(), + this.fileSystemManager.getRootDir(), false /*ReadOnly = false*/); // publish cluster ID status.setStatus("Publishing Cluster ID in ZooKeeper"); ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); if (!masterRecovery) { - this.executorService = new ExecutorService(getServerName().toString()); - this.serverManager = createServerManager(this, this); + this.executorService = executorServiceFactory.create(getServerName().toString()); + this.serverManager = serverManagerFactory.create(this, this, true /* connect */ ); } status.setStatus("Initializing ZK system trackers"); @@ -635,7 +674,7 @@ Server { if (!masterRecovery) { // initialize master side coprocessors before we start handling requests status.setStatus("Initializing master coprocessors"); - this.cpHost = new MasterCoprocessorHost(this, this.conf); + this.cpHost = masterCoprocessorHostFactory.create(this); // start up all service threads. status.setStatus("Initializing master service threads"); @@ -688,7 +727,7 @@ Server { // been assigned. status.setStatus("Starting balancer and catalog janitor"); this.balancerChore = getAndStartBalancerChore(this); - this.catalogJanitorChore = new CatalogJanitor(this, this); + this.catalogJanitorChore = catalogJanitorFactory.create(this, this); startCatalogJanitorChore(); registerMBean(); @@ -731,22 +770,6 @@ Server { } /** - * Create a {@link ServerManager} instance. - * @param master - * @param services - * @return An instance of {@link ServerManager} - * @throws ZooKeeperConnectionException - * @throws IOException - */ - ServerManager createServerManager(final Server master, - final MasterServices services) - throws IOException { - // We put this out here in a method so can do a Mockito.spy and stub it out - // w/ a mocked up ServerManager. - return new ServerManager(master, services); - } - - /** * If ServerShutdownHandler is disabled, we enable it and expire those dead * but not expired servers. * @throws IOException @@ -1135,7 +1158,7 @@ Server { */ protected RegionServerStartupResponse.Builder createConfigurationSubset() { RegionServerStartupResponse.Builder resp = addConfig( - RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR); + RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR); return addConfig(resp, "fs.default.name"); } @@ -2222,31 +2245,6 @@ Server { } /** - * Utility for constructing an instance of the passed HMaster class. - * @param masterClass - * @param conf - * @return HMaster instance. - */ - public static HMaster constructMaster(Class masterClass, - final Configuration conf) { - try { - Constructor c = - masterClass.getConstructor(Configuration.class); - return c.newInstance(conf); - } catch (InvocationTargetException ite) { - Throwable target = ite.getTargetException() != null? - ite.getTargetException(): ite; - if (target.getCause() != null) target = target.getCause(); - throw new RuntimeException("Failed construction of Master: " + - masterClass.toString(), target); - } catch (Exception e) { - throw new RuntimeException("Failed construction of Master: " + - masterClass.toString() + ((e.getCause() != null)? - e.getCause().getMessage(): ""), e); - } - } - - /** * @see org.apache.hadoop.hbase.master.HMasterCommandLine */ public static void main(String [] args) throws Exception { @@ -2259,8 +2257,7 @@ Server { */ void registerMBean() { MXBeanImpl mxBeanInfo = MXBeanImpl.init(this); - mxBean = CompatibilitySingletonFactory.getInstance( - MBeanSource.class).register("hbase", "HMaster,sub=MXBean", mxBeanInfo); + mxBean = mBeanSource.register("hbase", "HMaster,sub=MXBean", mxBeanInfo); LOG.info("Registered HMaster MXBean"); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java index 16a3cd8..6ce73b5 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java @@ -23,6 +23,7 @@ import java.io.File; import java.io.IOException; import java.util.List; +import com.google.inject.Injector; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.Options; @@ -37,11 +38,11 @@ import org.apache.hadoop.hbase.LocalHBaseCluster; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.HBaseGuice; +import org.apache.hadoop.hbase.LocalHBaseClusterFactory; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.ServerCommandLine; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.zookeeper.KeeperException; @InterfaceAudience.Private public class HMasterCommandLine extends ServerCommandLine { @@ -143,13 +144,15 @@ public class HMasterCommandLine extends ServerCommandLine { Integer.toString(clientPort)); // Need to have the zk cluster shutdown when master is shutdown. // Run a subclass that does the zk cluster shutdown on its way out. - LocalHBaseCluster cluster = new LocalHBaseCluster(conf, 1, 1, - LocalHMaster.class, HRegionServer.class); + Injector injector = HBaseGuice.createLocalHBaseInjector(); + LocalHBaseCluster cluster = injector.getInstance(LocalHBaseClusterFactory.class).create(conf, 1,1); + ((LocalHMaster)cluster.getMaster(0)).setZKCluster(zooKeeperCluster); cluster.startup(); waitOnMasterThreads(cluster); } else { - HMaster master = HMaster.constructMaster(masterClass, conf); + Injector injector = HBaseGuice.createInjector(); + HMaster master = injector.getInstance(HMasterFactory.class).create(conf); if (master.isStopped()) { LOG.info("Won't bring the Master up as a shutdown is requested"); return -1; @@ -209,32 +212,4 @@ public class HMasterCommandLine extends ServerCommandLine { t.getRegionServer().stop("HMaster Aborted; Bringing down regions servers"); } } - - /* - * Version of master that will shutdown the passed zk cluster on its way out. - */ - public static class LocalHMaster extends HMaster { - private MiniZooKeeperCluster zkcluster = null; - - public LocalHMaster(Configuration conf) - throws IOException, KeeperException, InterruptedException { - super(conf); - } - - @Override - public void run() { - super.run(); - if (this.zkcluster != null) { - try { - this.zkcluster.shutdown(); - } catch (IOException e) { - e.printStackTrace(); - } - } - } - - void setZKCluster(final MiniZooKeeperCluster zkcluster) { - this.zkcluster = zkcluster; - } - } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterFactory.java new file mode 100644 index 0000000..5be259b --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterFactory.java @@ -0,0 +1,15 @@ +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; + +/** + * Interface of a Factory that can construct a HMaster. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface HMasterFactory { + + public HMaster create(Configuration conf); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterModule.java new file mode 100644 index 0000000..8623b85 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class HMasterModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(HMaster.class, HMaster.class) + .build(HMasterFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/LocalHMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/LocalHMaster.java new file mode 100644 index 0000000..caff548 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/LocalHMaster.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import com.google.inject.assistedinject.Assisted; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.catalog.CatalogTrackerFactory; +import org.apache.hadoop.hbase.executor.ExecutorServiceFactory; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; +import org.apache.hadoop.hbase.master.metrics.MasterMetricsFactory; +import org.apache.hadoop.hbase.metrics.MBeanSource; +import org.apache.hadoop.hbase.util.FSTableDescriptorsFactory; +import org.apache.hadoop.hbase.util.SleeperFactory; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.DrainingServerTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; +import org.apache.hadoop.hbase.zookeeper.RegionServerTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcherFactory; +import org.apache.zookeeper.KeeperException; + +import javax.inject.Inject; +import java.io.IOException; + +/** + * + */ +/* +* Version of master that will shutdown the passed zk cluster on its way out. +*/ +@InterfaceAudience.Private +public class LocalHMaster extends HMaster { + + private MiniZooKeeperCluster zkcluster = null; + + @Inject + public LocalHMaster(@Assisted final Configuration conf, + final RpcServerFactory rpcServerFactory, + final SleeperFactory sleeperFactory, + final ZooKeeperWatcherFactory zooKeeperWatcherFactory, + final MasterMetricsFactory masterMetricsFactory, + final ClusterStatusTrackerFactory clusterStatusTrackerFactory, + final ActiveMasterManagerFactory activeMasterManagerFactory, + final CatalogTrackerFactory catalogTrackerFactory, + final AssignmentManagerFactory assignmentManagerFactory, + final RegionServerTrackerFactory regionServerTrackerFactory, + final DrainingServerTrackerFactory drainingServerTrackerFactory, + final LoadBalancerFactory loadBalancerFactory, + final MasterFileSystemFactory masterFileSystemFactory, + final FSTableDescriptorsFactory fsTableDescriptorsFactory, + final ExecutorServiceFactory executorServiceFactory, + final ServerManagerFactory serverManagerFactory, + final MasterCoprocessorHostFactory masterCoprocessorHostFactory, + final CatalogJanitorFactory catalogJanitorFactory, + final MBeanSource mBeanSource) + throws IOException, KeeperException, InterruptedException { + super(conf, + rpcServerFactory, + sleeperFactory, + zooKeeperWatcherFactory, + masterMetricsFactory, + clusterStatusTrackerFactory, + activeMasterManagerFactory, + catalogTrackerFactory, + assignmentManagerFactory, + regionServerTrackerFactory, + drainingServerTrackerFactory, + loadBalancerFactory, + masterFileSystemFactory, + fsTableDescriptorsFactory, + executorServiceFactory, + serverManagerFactory, + masterCoprocessorHostFactory, + catalogJanitorFactory, + mBeanSource); + } + + @Override + public void run() { + super.run(); + if (this.zkcluster != null) { + try { + this.zkcluster.shutdown(); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + void setZKCluster(final MiniZooKeeperCluster zkcluster) { + this.zkcluster = zkcluster; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/LocalHMasterModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/LocalHMasterModule.java new file mode 100644 index 0000000..15c83be --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/LocalHMasterModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class LocalHMasterModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(HMaster.class, LocalHMaster.class) + .build(HMasterFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 20f82e1..c38339c 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -27,6 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.coprocessor.*; +import javax.inject.Inject; import java.io.IOException; /** @@ -62,7 +64,9 @@ public class MasterCoprocessorHost private MasterServices masterServices; - MasterCoprocessorHost(final MasterServices services, final Configuration conf) { + @Inject + MasterCoprocessorHost(@Assisted final MasterServices services, + final Configuration conf) { this.masterServices = services; loadSystemCoprocessors(conf, MASTER_COPROCESSOR_CONF_KEY); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHostFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHostFactory.java new file mode 100644 index 0000000..9e9eded --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHostFactory.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Interface of a Factory that can construct a MasterCoprocessorHost. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface MasterCoprocessorHostFactory { + + public MasterCoprocessorHost create(final MasterServices services); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHostModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHostModule.java new file mode 100644 index 0000000..d2db185 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHostModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class MasterCoprocessorHostModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(MasterCoprocessorHost.class, MasterCoprocessorHost.class) + .build(MasterCoprocessorHostFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index d9e0d01..00e03fc 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -27,6 +27,7 @@ import java.util.UUID; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -56,6 +57,8 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; +import javax.inject.Inject; + /** * This class abstracts a bunch of operations the HMaster needs to interact with * the underlying file system, including splitting log files, checking file @@ -86,10 +89,14 @@ public class MasterFileSystem { final SplitLogManager splitLogManager; private final MasterServices services; - public MasterFileSystem(Server master, MasterServices services, - MasterMetrics metrics, boolean masterRecovery) - throws IOException { - this.conf = master.getConfiguration(); + @Inject + public MasterFileSystem(@Assisted Configuration conf, + @Assisted Server master, + @Assisted MasterServices services, + @Assisted MasterMetrics metrics, + @Assisted boolean masterRecovery) + throws IOException { + this.conf = conf; this.master = master; this.services = services; this.metrics = metrics; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystemFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystemFactory.java new file mode 100644 index 0000000..e075f39 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystemFactory.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.master.metrics.MasterMetrics; + +/** + * Interface of a Factory that can construct a MasterFileSystem. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface MasterFileSystemFactory { + + public MasterFileSystem create( + Configuration conf, + Server master, + MasterServices services, + MasterMetrics metrics, + boolean masterRecovery); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystemModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystemModule.java new file mode 100644 index 0000000..65365a5 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystemModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class MasterFileSystemModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(MasterFileSystem.class, MasterFileSystem.class) + .build(MasterFileSystemFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 6939d8e..4d3393c 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -31,6 +31,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -60,6 +61,8 @@ import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import com.google.protobuf.ServiceException; +import javax.inject.Inject; + /** * The ServerManager class manages info about region servers. *

@@ -126,8 +129,9 @@ public class ServerManager { this(master, services, true); } - ServerManager(final Server master, final MasterServices services, - final boolean connect) throws ZooKeeperConnectionException { + @Inject + ServerManager(@Assisted final Server master, @Assisted final MasterServices services, + @Assisted final boolean connect) throws ZooKeeperConnectionException { this.master = master; this.services = services; Configuration c = master.getConfiguration(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManagerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManagerFactory.java new file mode 100644 index 0000000..6b51242 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManagerFactory.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; + +import java.io.IOException; + +/** + * Interface of a Factory that can construct a ServerManager. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface ServerManagerFactory { + + public ServerManager create(final Server master, + final MasterServices services, + final boolean connect) throws IOException; + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManagerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManagerModule.java new file mode 100644 index 0000000..ef55c3f --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManagerModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class ServerManagerModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(ServerManager.class, ServerManager.class) + .build(ServerManagerFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java index 68a0887..6de1b73 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java @@ -1,46 +1,14 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.master.LoadBalancer; -import org.apache.hadoop.util.ReflectionUtils; /** - * The class that creates a load balancer from a conf. + * Interface of a Factory that can construct a ActiveMasterManager. + * This will be used by guice in DI. */ @InterfaceAudience.Private -public class LoadBalancerFactory { - - /** - * Create a loadblanacer from the given conf. - * @param conf - * @return A {@link LoadBalancer} - */ - public static LoadBalancer getLoadBalancer(Configuration conf) { - - // Create the balancer - Class balancerKlass = - conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, DefaultLoadBalancer.class, - LoadBalancer.class); - return ReflectionUtils.newInstance(balancerKlass, conf); +public interface LoadBalancerFactory { - } + LoadBalancer create(); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactoryImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactoryImpl.java new file mode 100644 index 0000000..63af771 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactoryImpl.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.balancer; + +import com.google.inject.Injector; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.master.LoadBalancer; + +import javax.inject.Inject; + +/** The class that creates a load balancer from a conf. */ +@InterfaceAudience.Private +public class LoadBalancerFactoryImpl implements LoadBalancerFactory { + + Injector injector; + Configuration conf; + + @Inject + public LoadBalancerFactoryImpl(Configuration conf, Injector injector) { + this.conf = conf; + // Injecting the injector is hacky but it's the only way that I can see to still have classes + // loaded from the conf and use dependency injection. If we ever go to a fully plugin arch + // then we should re-look at this. + this.injector = injector; + } + + /** Create a loadblanacer from the given conf. */ + @Override + public LoadBalancer create() { + + // Create the balancer + Class balancerKlass = + conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, DefaultLoadBalancer.class, + LoadBalancer.class); + LoadBalancer lb = injector.getInstance(balancerKlass); + lb.setConf(conf); + return lb; + + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerModule.java new file mode 100644 index 0000000..7315217 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerModule.java @@ -0,0 +1,14 @@ +package org.apache.hadoop.hbase.master.balancer; + +import com.google.inject.AbstractModule; + +/** + * + */ +public class LoadBalancerModule extends AbstractModule { + + @Override + protected void configure() { + bind(LoadBalancerFactory.class).to(LoadBalancerFactoryImpl.class); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java index 8a575dc..ef05a62 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java @@ -21,7 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; + +import com.google.inject.assistedinject.Assisted; + +import javax.inject.Inject; /** * This class is for maintaining the various master statistics @@ -36,8 +39,10 @@ public class MasterMetrics { private final Log LOG = LogFactory.getLog(this.getClass()); private MasterMetricsSource masterMetricsSource; - public MasterMetrics(final String name) { - masterMetricsSource = CompatibilitySingletonFactory.getInstance(MasterMetricsSource.class); + + @Inject + public MasterMetrics(MasterMetricsSource mms, @Assisted final String name) { + masterMetricsSource = mms; } // for unit-test usage diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsFactory.java new file mode 100644 index 0000000..64517c2 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsFactory.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.metrics; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Interface of a Factory that can construct a MasterMetrics. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface MasterMetricsFactory { + + public MasterMetrics create(String name); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsModule.java new file mode 100644 index 0000000..d8fa8a7 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsModule.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.metrics; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class MasterMetricsModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder().implement(MasterMetrics.class, MasterMetrics.class) + .build(MasterMetricsFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 799a64a..039c444 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -29,6 +29,7 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -37,6 +38,8 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import com.google.common.base.Preconditions; +import javax.inject.Inject; + /** * Compact region on request and then run split if appropriate */ @@ -59,7 +62,8 @@ public class CompactSplitThread implements CompactionRequestor { private int regionSplitLimit; /** @param server */ - CompactSplitThread(HRegionServer server) { + @Inject + CompactSplitThread(@Assisted HRegionServer server) { super(); this.server = server; this.conf = server.getConfiguration(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThreadFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThreadFactory.java new file mode 100644 index 0000000..d2e78fd --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThreadFactory.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Interface of a Factory that can construct a CompactSplitThread. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface CompactSplitThreadFactory { + + public CompactSplitThread create(HRegionServer server); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThreadModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThreadModule.java new file mode 100644 index 0000000..a703c5d --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThreadModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class CompactSplitThreadModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(CompactSplitThread.class, CompactSplitThread.class) + .build(CompactSplitThreadFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionChecker.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionChecker.java new file mode 100644 index 0000000..a2de4ef --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionChecker.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import com.google.inject.assistedinject.Assisted; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.util.StringUtils; + +import javax.inject.Inject; +import java.io.IOException; + +/** + * + */ +/* +* Inner class that runs on a long period checking if regions need compaction. +*/ +public class CompactionChecker extends Chore { + + public static final Log LOG = LogFactory.getLog(CompactionChecker.class); + + private final HRegionServer instance; + private final int majorCompactPriority; + private final static int DEFAULT_PRIORITY = Integer.MAX_VALUE; + + @Inject + CompactionChecker(@Assisted final HRegionServer h, @Assisted final int sleepTime) { + super("CompactionChecker", sleepTime, h); + this.instance = h; + LOG.info("Runs every " + StringUtils.formatTime(sleepTime)); + + /* MajorCompactPriority is configurable. + * If not set, the compaction will use default priority. + */ + this.majorCompactPriority = this.instance.conf. + getInt("hbase.regionserver.compactionChecker.majorCompactPriority", + DEFAULT_PRIORITY); + } + + @Override + protected void chore() { + for (HRegion r : this.instance.onlineRegions.values()) { + if (r == null) + continue; + for (Store s : r.getStores().values()) { + try { + if (s.needsCompaction()) { + // Queue a compaction. Will recognize if major is needed. + this.instance.compactSplitThread.requestCompaction(r, s, + getName() + " requests compaction"); + } else if (s.isMajorCompaction()) { + if (majorCompactPriority == DEFAULT_PRIORITY || + majorCompactPriority > r.getCompactPriority()) { + this.instance.compactSplitThread.requestCompaction(r, s, + getName() + " requests major compaction; use default priority"); + } else { + this.instance.compactSplitThread.requestCompaction(r, s, + getName() + " requests major compaction; use configured priority", + this.majorCompactPriority); + } + } + } catch (IOException e) { + LOG.warn("Failed major compaction check on " + r, e); + } + } + } + } +} \ No newline at end of file diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionCheckerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionCheckerFactory.java new file mode 100644 index 0000000..53dffc3 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionCheckerFactory.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Interface of a Factory that can construct a CompactionChecker. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface CompactionCheckerFactory { + + public CompactionChecker create(final HRegionServer hrs, final int sleepTime); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionCheckerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionCheckerModule.java new file mode 100644 index 0000000..62d61c8 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionCheckerModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class CompactionCheckerModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(CompactionChecker.class, CompactionChecker.class) + .build(CompactionCheckerFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index ce945ac..6ba031d 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -51,8 +51,10 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantReadWriteLock; +import javax.inject.Inject; import javax.management.ObjectName; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.lang.mutable.MutableDouble; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -104,6 +106,13 @@ import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.WritableByteArrayComparable; import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.catalog.CatalogTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.MasterAddressTrackerFactory; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationFactory; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.util.SleeperFactory; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcherFactory; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheStats; @@ -436,6 +445,19 @@ public class HRegionServer implements ClientProtocol, */ private final int scannerLeaseTimeoutPeriod; + /** + * Factories to be used later when initializing the HRegionServer. + */ + private final ReplicationFactory replicationFactory; + private final ZooKeeperWatcherFactory zooKeeperWatcherFactory; + private final MasterAddressTrackerFactory masterAddressTrackerFactory; + private final ClusterStatusTrackerFactory clusterStatusTrackerFactory; + private final CatalogTrackerFactory catalogTrackerFactory; + private final MemStoreFlusherFactory memStoreFlusherFactory; + private final CompactSplitThreadFactory compactSplitThreadFactory; + private final CompactionCheckerFactory compactionCheckerFactory; + private final LeasesFactory leasesFactory; + private final HRegionThriftServerFactory hRegionThriftServerFactory; /** * Starts a HRegionServer at the default location @@ -444,8 +466,32 @@ public class HRegionServer implements ClientProtocol, * @throws IOException * @throws InterruptedException */ - public HRegionServer(Configuration conf) - throws IOException, InterruptedException { + @Inject + public HRegionServer(@Assisted Configuration conf, + SleeperFactory sleeperFactory, + RpcServerFactory rpcServerFactory, + ReplicationFactory replicationFactory, + RegionServerAccounting rsAccounting, + ZooKeeperWatcherFactory zooKeeperWatcherFactory, + MasterAddressTrackerFactory masterAddressTrackerFactory, + ClusterStatusTrackerFactory clusterStatusTrackerFactory, + CatalogTrackerFactory catalogTrackerFactory, + MemStoreFlusherFactory memStoreFlusherFactory, + CompactSplitThreadFactory compactSplitThreadFactory, + CompactionCheckerFactory compactionCheckerFactory, + LeasesFactory leasesFactory, + HRegionThriftServerFactory hRegionThriftServerFactory) + throws IOException, InterruptedException { + this.replicationFactory = replicationFactory; + this.zooKeeperWatcherFactory = zooKeeperWatcherFactory; + this.masterAddressTrackerFactory = masterAddressTrackerFactory; + this.clusterStatusTrackerFactory = clusterStatusTrackerFactory; + this.catalogTrackerFactory = catalogTrackerFactory; + this.memStoreFlusherFactory = memStoreFlusherFactory; + this.compactSplitThreadFactory = compactSplitThreadFactory; + this.compactionCheckerFactory = compactionCheckerFactory; + this.leasesFactory = leasesFactory; + this.hRegionThriftServerFactory = hRegionThriftServerFactory; this.fsOk = true; this.conf = conf; // Set how many times to retry talking to another server over HConnection. @@ -464,7 +510,7 @@ public class HRegionServer implements ClientProtocol, 10 * 1000); this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000); - this.sleeper = new Sleeper(this.msgInterval, this); + this.sleeper = sleeperFactory.create(this.msgInterval, this); this.maxScannerResultSize = conf.getLong( HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, @@ -499,10 +545,12 @@ public class HRegionServer implements ClientProtocol, throw new IllegalArgumentException("Failed resolve of " + initialIsa); } - this.rpcServer = HBaseRPC.getServer(AdminProtocol.class, this, - new Class[]{ClientProtocol.class, - AdminProtocol.class, HBaseRPCErrorHandler.class, - OnlineRegions.class}, + this.rpcServer = rpcServerFactory.create(this, + new Class[]{ ClientProtocol.class, + AdminProtocol.class, + HBaseRPCErrorHandler.class, + OnlineRegions.class}, + initialIsa.getHostName(), // BindAddress is IP we got for this server. initialIsa.getPort(), conf.getInt("hbase.regionserver.handler.count", 10), @@ -519,8 +567,8 @@ public class HRegionServer implements ClientProtocol, // login the server principal (if using secure Hadoop) User.login(this.conf, "hbase.regionserver.keytab.file", "hbase.regionserver.kerberos.principal", this.isa.getHostName()); - regionServerAccounting = new RegionServerAccounting(); - cacheConfig = new CacheConfig(conf); + this.regionServerAccounting = rsAccounting; + this.cacheConfig = new CacheConfig(conf); } /** @@ -667,25 +715,25 @@ public class HRegionServer implements ClientProtocol, */ private void initializeZooKeeper() throws IOException, InterruptedException { // Open connection to zookeeper and set primary watcher - this.zooKeeper = new ZooKeeperWatcher(conf, REGIONSERVER + ":" + - this.isa.getPort(), this); + this.zooKeeper = zooKeeperWatcherFactory.create(this.conf, REGIONSERVER + ":" + + this.isa.getPort(), this, false); // Create the master address manager, register with zk, and start it. Then // block until a master is available. No point in starting up if no master // running. - this.masterAddressManager = new MasterAddressTracker(this.zooKeeper, this); + this.masterAddressManager = masterAddressTrackerFactory.create(this.zooKeeper, this); this.masterAddressManager.start(); blockAndCheckIfStopped(this.masterAddressManager); // Wait on cluster being up. Master will set this flag up in zookeeper // when ready. - this.clusterStatusTracker = new ClusterStatusTracker(this.zooKeeper, this); + this.clusterStatusTracker = clusterStatusTrackerFactory.create(this.zooKeeper, this); this.clusterStatusTracker.start(); blockAndCheckIfStopped(this.clusterStatusTracker); // Create the catalog tracker and start it; - this.catalogTracker = new CatalogTracker(this.zooKeeper, this.conf, - this, this.conf.getInt("hbase.regionserver.catalog.timeout", Integer.MAX_VALUE)); + this.catalogTracker = catalogTrackerFactory.create(this.zooKeeper, this.conf, + this, this.conf.getInt("hbase.regionserver.catalog.timeout", Integer.MAX_VALUE)); catalogTracker.start(); } @@ -714,23 +762,23 @@ public class HRegionServer implements ClientProtocol, private void initializeThreads() throws IOException { // Cache flushing thread. - this.cacheFlusher = new MemStoreFlusher(conf, this); + this.cacheFlusher = memStoreFlusherFactory.create(this.conf, this); // Compaction thread - this.compactSplitThread = new CompactSplitThread(this); + this.compactSplitThread = compactSplitThreadFactory.create(this); // Background thread to check for compactions; needed if region // has not gotten updates in a while. Make it run at a lesser frequency. int multiplier = this.conf.getInt(HConstants.THREAD_WAKE_FREQUENCY + ".multiplier", 1000); - this.compactionChecker = new CompactionChecker(this, - this.threadWakeFrequency * multiplier, this); + this.compactionChecker = compactionCheckerFactory.create(this, + this.threadWakeFrequency * multiplier); - this.leases = new Leases(this.threadWakeFrequency); + this.leases = leasesFactory.create(this.threadWakeFrequency); // Create the thread for the ThriftServer. if (conf.getBoolean("hbase.regionserver.export.thrift", false)) { - thriftServer = new HRegionThriftServer(this, conf); + thriftServer = hRegionThriftServerFactory.create(this.conf, this); thriftServer.start(); LOG.info("Started Thrift API from Region Server."); } @@ -1201,57 +1249,6 @@ public class HRegionServer implements ClientProtocol, return r != null ? createRegionLoad(r) : null; } - /* - * Inner class that runs on a long period checking if regions need compaction. - */ - private static class CompactionChecker extends Chore { - private final HRegionServer instance; - private final int majorCompactPriority; - private final static int DEFAULT_PRIORITY = Integer.MAX_VALUE; - - CompactionChecker(final HRegionServer h, final int sleepTime, - final Stoppable stopper) { - super("CompactionChecker", sleepTime, h); - this.instance = h; - LOG.info("Runs every " + StringUtils.formatTime(sleepTime)); - - /* MajorCompactPriority is configurable. - * If not set, the compaction will use default priority. - */ - this.majorCompactPriority = this.instance.conf. - getInt("hbase.regionserver.compactionChecker.majorCompactPriority", - DEFAULT_PRIORITY); - } - - @Override - protected void chore() { - for (HRegion r : this.instance.onlineRegions.values()) { - if (r == null) - continue; - for (Store s : r.getStores().values()) { - try { - if (s.needsCompaction()) { - // Queue a compaction. Will recognize if major is needed. - this.instance.compactSplitThread.requestCompaction(r, s, - getName() + " requests compaction"); - } else if (s.isMajorCompaction()) { - if (majorCompactPriority == DEFAULT_PRIORITY || - majorCompactPriority > r.getCompactPriority()) { - this.instance.compactSplitThread.requestCompaction(r, s, - getName() + " requests major compaction; use default priority"); - } else { - this.instance.compactSplitThread.requestCompaction(r, s, - getName() + " requests major compaction; use configured priority", - this.majorCompactPriority); - } - } - } catch (IOException e) { - LOG.warn("Failed major compaction check on " + r, e); - } - } - } - } - } /** * Report the status of the server. A server is online once all the startup is @@ -1282,7 +1279,7 @@ public class HRegionServer implements ClientProtocol, // Instantiate replication manager if replication enabled. Pass it the // log directories. - createNewReplicationInstance(conf, this, this.fs, logdir, oldLogDir); + createNewReplicationInstance(logdir, oldLogDir); return instantiateHLog(logdir, oldLogDir); } @@ -2195,39 +2192,16 @@ public class HRegionServer implements ClientProtocol, /** * Load the replication service objects, if any */ - static private void createNewReplicationInstance(Configuration conf, - HRegionServer server, FileSystem fs, Path logDir, Path oldLogDir) throws IOException{ + private void createNewReplicationInstance( Path logDir, Path oldLogDir) throws IOException{ // If replication is not enabled, then return immediately. if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY, false)) { return; } - // read in the name of the source replication class from the config file. - String sourceClassname = conf.get(HConstants.REPLICATION_SOURCE_SERVICE_CLASSNAME, - HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT); + this.replicationSinkHandler = this.replicationFactory.create(this, fs, logDir, oldLogDir); + this.replicationSourceHandler = (ReplicationSourceService) this.replicationSinkHandler; - // read in the name of the sink replication class from the config file. - String sinkClassname = conf.get(HConstants.REPLICATION_SINK_SERVICE_CLASSNAME, - HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT); - - // If both the sink and the source class names are the same, then instantiate - // only one object. - if (sourceClassname.equals(sinkClassname)) { - server.replicationSourceHandler = (ReplicationSourceService) - newReplicationInstance(sourceClassname, - conf, server, fs, logDir, oldLogDir); - server.replicationSinkHandler = (ReplicationSinkService) - server.replicationSourceHandler; - } - else { - server.replicationSourceHandler = (ReplicationSourceService) - newReplicationInstance(sourceClassname, - conf, server, fs, logDir, oldLogDir); - server.replicationSinkHandler = (ReplicationSinkService) - newReplicationInstance(sinkClassname, - conf, server, fs, logDir, oldLogDir); - } } static private ReplicationService newReplicationInstance(String classname, diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java index c0cdde5..6e08aeb 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java @@ -19,6 +19,7 @@ */ package org.apache.hadoop.hbase.regionserver; +import com.google.inject.Injector; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -26,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.LocalHBaseCluster; +import org.apache.hadoop.hbase.HBaseGuice; import org.apache.hadoop.hbase.util.ServerCommandLine; /** @@ -59,7 +61,8 @@ public class HRegionServerCommandLine extends ServerCommandLine { + HConstants.CLUSTER_DISTRIBUTED + " is false"); } else { logJVMInfo(); - HRegionServer hrs = HRegionServer.constructRegionServer(regionServerClass, conf); + Injector injector = HBaseGuice.createInjector(); + HRegionServer hrs = injector.getInstance(HRegionServerFactory.class).create(conf); HRegionServer.startRegionServer(hrs); } return 0; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerFactory.java new file mode 100644 index 0000000..845154e --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerFactory.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; + +/** + * Interface of a Factory that can construct a HRegionServer. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface HRegionServerFactory { + + public HRegionServer create(Configuration conf); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerModule.java new file mode 100644 index 0000000..e72ccf5 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class HRegionServerModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(HRegionServer.class, HRegionServer.class) + .build(HRegionServerFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java index e33501e..de1cecf 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java @@ -26,6 +26,7 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.Map; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -42,6 +43,8 @@ import org.apache.hadoop.hbase.thrift.ThriftUtilities; import org.apache.hadoop.hbase.thrift.generated.IOError; import org.apache.hadoop.hbase.thrift.generated.TRowResult; +import javax.inject.Inject; + /** * HRegionThriftServer - this class starts up a Thrift server in the same * JVM where the RegionServer is running. It inherits most of the @@ -64,7 +67,8 @@ public class HRegionThriftServer extends Thread { * Create an instance of the glue object that connects the * RegionServer with the standard ThriftServer implementation */ - HRegionThriftServer(HRegionServer regionServer, Configuration conf) + @Inject + HRegionThriftServer(@Assisted Configuration conf, @Assisted HRegionServer regionServer ) throws IOException { super("Region Thrift Server"); this.rs = regionServer; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServerFactory.java new file mode 100644 index 0000000..121ea38 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServerFactory.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; + +/** + * Interface of a Factory that can construct a HRegionThriftServer. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface HRegionThriftServerFactory { + + public HRegionThriftServer create(Configuration conf, HRegionServer regionServer); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServerModule.java new file mode 100644 index 0000000..c11e56e --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServerModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class HRegionThriftServerModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(HRegionThriftServer.class, HRegionThriftServer.class) + .build(HRegionThriftServerFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java index f2bd568..fa6d686 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java @@ -19,11 +19,13 @@ */ package org.apache.hadoop.hbase.regionserver; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.HasThread; +import javax.inject.Inject; import java.util.ConcurrentModificationException; import java.util.HashMap; import java.util.Map; @@ -66,7 +68,8 @@ public class Leases extends HasThread { * @param leaseCheckFrequency - how often the lease should be checked * (milliseconds) */ - public Leases(final int leaseCheckFrequency) { + @Inject + public Leases(@Assisted final int leaseCheckFrequency) { this.leaseCheckFrequency = leaseCheckFrequency; setDaemon(true); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeasesFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeasesFactory.java new file mode 100644 index 0000000..0fcf771 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeasesFactory.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Interface of a Factory that can construct a Lease. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface LeasesFactory { + + public Leases create(final int leaseCheckFrequency); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeasesModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeasesModule.java new file mode 100644 index 0000000..370f815 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeasesModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class LeasesModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(Leases.class, Leases.class) + .build(LeasesFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index cb6ed3c..51b431f 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -35,6 +35,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -48,6 +49,8 @@ import org.apache.hadoop.util.StringUtils; import com.google.common.base.Preconditions; +import javax.inject.Inject; + /** * Thread that flushes cache on request * @@ -58,7 +61,7 @@ import com.google.common.base.Preconditions; * @see FlushRequester */ @InterfaceAudience.Private -class MemStoreFlusher extends HasThread implements FlushRequester { +public class MemStoreFlusher extends HasThread implements FlushRequester { static final Log LOG = LogFactory.getLog(MemStoreFlusher.class); // These two data members go together. Any entry in the one must have // a corresponding entry in the other. @@ -89,8 +92,9 @@ class MemStoreFlusher extends HasThread implements FlushRequester { * @param conf * @param server */ - public MemStoreFlusher(final Configuration conf, - final HRegionServer server) { + @Inject + MemStoreFlusher(@Assisted final Configuration conf, + @Assisted final HRegionServer server) { super(); this.server = server; this.threadWakeFrequency = diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusherFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusherFactory.java new file mode 100644 index 0000000..6b25831 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusherFactory.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; + +/** + * Interface of a Factory that can construct a MemStoreFlusher. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface MemStoreFlusherFactory { + + public MemStoreFlusher create(final Configuration conf, final HRegionServer server); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusherModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusherModule.java new file mode 100644 index 0000000..0042122 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusherModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class MemStoreFlusherModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(MemStoreFlusher.class, MemStoreFlusher.class) + .build(MemStoreFlusherFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java index 6eaa51f..555cebb 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java @@ -32,6 +32,7 @@ import java.util.TreeMap; import java.util.TreeSet; import java.util.concurrent.atomic.AtomicBoolean; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -49,6 +50,8 @@ import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.ConnectionLossException; import org.apache.zookeeper.KeeperException.SessionExpiredException; +import javax.inject.Inject; + /** * This class serves as a helper for all things related to zookeeper in * replication. @@ -139,7 +142,9 @@ public class ReplicationZookeeper implements Closeable{ * @throws IOException * @throws KeeperException */ - public ReplicationZookeeper(final Server server, final AtomicBoolean replicating) + @Inject + public ReplicationZookeeper(@Assisted final Server server, + @Assisted final AtomicBoolean replicating) throws IOException, KeeperException { this.abortable = server; this.zookeeper = server.getZooKeeper(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperFactory.java new file mode 100644 index 0000000..ac4ce57 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperFactory.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.replication.ReplicationZookeeper; +import org.apache.zookeeper.KeeperException; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Interface of a Factory that can construct a ReplicationZookeeper. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface ReplicationZookeeperFactory { + + public ReplicationZookeeper create(final Server server, final AtomicBoolean replicating) throws + KeeperException; + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperModule.java new file mode 100644 index 0000000..396dbb6 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class ReplicationZookeeperModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(ReplicationZookeeper.class, ReplicationZookeeper.class) + .build(ReplicationZookeeperFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 250ea86..eb21dbb 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -24,6 +24,7 @@ import java.util.NavigableMap; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicBoolean; +import com.google.inject.assistedinject.Assisted; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -32,6 +33,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.replication.ReplicationZookeeperFactory; import org.apache.hadoop.hbase.regionserver.ReplicationSourceService; import org.apache.hadoop.hbase.regionserver.ReplicationSinkService; import org.apache.hadoop.hbase.regionserver.wal.HLog; @@ -43,6 +45,8 @@ import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.zookeeper.KeeperException; +import javax.inject.Inject; + import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; import static org.apache.hadoop.hbase.HConstants.REPLICATION_ENABLE_KEY; import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL; @@ -62,6 +66,11 @@ public class Replication implements WALActionsListener, // Hosting server private Server server; + //Factories for lazy init. + private ReplicationZookeeperFactory replicationZookeeperFactory; + private ReplicationSourceManagerFactory replicationSourceManagerFactory; + private ReplicationSinkFactory replicationSinkFactory; + /** * Instantiate the replication management (if rep is enabled). * @param server Hosting server @@ -70,8 +79,17 @@ public class Replication implements WALActionsListener, * @param oldLogDir directory where logs are archived * @throws IOException */ - public Replication(final Server server, final FileSystem fs, - final Path logDir, final Path oldLogDir) throws IOException{ + @Inject + public Replication(@Assisted final Server server, + @Assisted final FileSystem fs, + @Assisted("logDir") final Path logDir, + @Assisted("oldLogDir") final Path oldLogDir, + ReplicationZookeeperFactory replicationZookeeperFactory, + ReplicationSourceManagerFactory replicationSourceManagerFactory, + ReplicationSinkFactory replicationSinkFactory) throws IOException{ + this.replicationZookeeperFactory = replicationZookeeperFactory; + this.replicationSourceManagerFactory = replicationSourceManagerFactory; + this.replicationSinkFactory = replicationSinkFactory; initialize(server, fs, logDir, oldLogDir); } @@ -88,13 +106,13 @@ public class Replication implements WALActionsListener, this.replication = isReplication(this.conf); if (replication) { try { - this.zkHelper = new ReplicationZookeeper(server, this.replicating); + this.zkHelper = replicationZookeeperFactory.create(server, this.replicating); } catch (KeeperException ke) { throw new IOException("Failed replication handler create " + "(replicating=" + this.replicating, ke); } - this.replicationManager = new ReplicationSourceManager(zkHelper, conf, - this.server, fs, this.replicating, logDir, oldLogDir) ; + this.replicationManager = replicationSourceManagerFactory.create( + conf,zkHelper, this.server, fs, this.replicating, logDir, oldLogDir) ; } else { this.replicationManager = null; this.zkHelper = null; @@ -150,7 +168,7 @@ public class Replication implements WALActionsListener, public void startReplicationService() throws IOException { if (this.replication) { this.replicationManager.init(); - this.replicationSink = new ReplicationSink(this.conf, this.server); + this.replicationSink = replicationSinkFactory.create(); } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationFactory.java new file mode 100644 index 0000000..84699da --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationFactory.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver; + +import com.google.inject.assistedinject.Assisted; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.replication.regionserver.Replication; + +import java.io.IOException; + +/** + * Interface of a Factory that can construct a ActiveMasterManager. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface ReplicationFactory { + + public Replication create(final Server server, + final FileSystem fs, + @Assisted("logDir") final Path logDir, + @Assisted("oldLogDir") final Path oldLogDir) throws IOException; + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationModule.java new file mode 100644 index 0000000..da6738e --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class ReplicationModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(Replication.class, Replication.class) + .build(ReplicationFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index a359f78..3a03535 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationSinkMetrics; import org.apache.hadoop.hbase.util.Bytes; +import javax.inject.Inject; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -74,12 +75,13 @@ public class ReplicationSink { * @param stopper boolean to tell this thread to stop * @throws IOException thrown when HDFS goes bad or bad file name */ - public ReplicationSink(Configuration conf, Stoppable stopper) + @Inject + public ReplicationSink(Configuration conf, ReplicationSinkMetrics metrics) throws IOException { this.conf = conf; this.pool = new HTablePool(this.conf, conf.getInt("replication.sink.htablepool.capacity", 10)); - this.metrics = new ReplicationSinkMetrics(); + this.metrics = metrics; } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkFactory.java new file mode 100644 index 0000000..ad37245 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkFactory.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Interface of a Factory that can construct a ReplicationSink. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface ReplicationSinkFactory { + + public ReplicationSink create(); +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkModule.java new file mode 100644 index 0000000..7e6d748 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class ReplicationSinkModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(ReplicationSink.class, ReplicationSink.class) + .build(ReplicationSinkFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index ddca9d1..2768fa7 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -37,6 +37,7 @@ import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -57,12 +58,15 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.replication.ReplicationZookeeper; import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationSourceMetrics; +import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationSourceMetricsFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.KeeperException; +import javax.inject.Inject; + /** * Class that handles the source of a replication stream. * Currently does not handle more than 1 slave @@ -141,28 +145,19 @@ public class ReplicationSource extends Thread // Metrics for this source private ReplicationSourceMetrics metrics; - /** - * Instantiation method used by region servers - * - * @param conf configuration to use - * @param fs file system to use - * @param manager replication manager to ping to - * @param stopper the atomic boolean to use to stop the regionserver - * @param replicating the atomic boolean that starts/stops replication - * @param peerClusterZnode the name of our znode - * @throws IOException - */ - public void init(final Configuration conf, - final FileSystem fs, - final ReplicationSourceManager manager, - final Stoppable stopper, - final AtomicBoolean replicating, - final String peerClusterZnode) + @Inject + public ReplicationSource(@Assisted final Configuration conf, + @Assisted final FileSystem fs, + @Assisted final ReplicationSourceManager manager, + @Assisted final Stoppable stopper, + @Assisted final AtomicBoolean replicating, + @Assisted final String peerClusterZnode, + final ReplicationSourceMetricsFactory replicationSourceMetricsFactory) throws IOException { this.stopper = stopper; this.conf = conf; this.replicationQueueSizeCapacity = - this.conf.getLong("replication.source.size.capacity", 1024*1024*64); + this.conf.getLong("replication.source.size.capacity", 1024 * 1024 * 64); this.replicationQueueNbCapacity = this.conf.getInt("replication.source.nb.capacity", 25000); this.entriesArray = new HLog.Entry[this.replicationQueueNbCapacity]; @@ -186,7 +181,7 @@ public class ReplicationSource extends Thread this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.fs = fs; - this.metrics = new ReplicationSourceMetrics(peerClusterZnode); + this.metrics = replicationSourceMetricsFactory.create(peerClusterZnode); try { this.clusterId = UUID.fromString(ZKClusterId.readClusterIdZNode(zkHelper diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java new file mode 100644 index 0000000..1b4100a --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Interface of a Factory that can construct a ReplicationSourceInterface. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface ReplicationSourceFactory { + + ReplicationSourceInterface create( + final Configuration conf, + final FileSystem fs, + final ReplicationSourceManager manager, + final Stoppable stopper, + final AtomicBoolean replicating, + final String peerClusterZnode); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java index ccafe1f..f4a660c 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java @@ -35,23 +35,6 @@ import org.apache.hadoop.hbase.Stoppable; public interface ReplicationSourceInterface { /** - * Initializer for the source - * @param conf the configuration to use - * @param fs the file system to use - * @param manager the manager to use - * @param stopper the stopper object for this region server - * @param replicating the status of the replication on this cluster - * @param peerClusterId the id of the peer cluster - * @throws IOException - */ - public void init(final Configuration conf, - final FileSystem fs, - final ReplicationSourceManager manager, - final Stoppable stopper, - final AtomicBoolean replicating, - final String peerClusterId) throws IOException; - - /** * Add a log to the list of logs to replicate * @param log path to the log to replicate */ diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index 4a3ed90..e216d9f 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -35,6 +35,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -49,6 +50,8 @@ import org.apache.zookeeper.KeeperException; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import javax.inject.Inject; + /** * This class is responsible to manage all the replication * sources. There are two classes of sources: @@ -90,6 +93,7 @@ public class ReplicationSourceManager { private final long sleepBeforeFailover; // Homemade executer service for replication private final ThreadPoolExecutor executor; + private ReplicationSourceFactory replicationSourceFactory; /** * Creates a replication manager and sets the watch on all the other @@ -102,13 +106,16 @@ public class ReplicationSourceManager { * @param logDir the directory that contains all hlog directories of live RSs * @param oldLogDir the directory where old logs are archived */ - public ReplicationSourceManager(final ReplicationZookeeper zkHelper, - final Configuration conf, - final Stoppable stopper, - final FileSystem fs, - final AtomicBoolean replicating, - final Path logDir, - final Path oldLogDir) { + @Inject + public ReplicationSourceManager(@Assisted final Configuration conf, + @Assisted final ReplicationZookeeper zkHelper, + @Assisted final Stoppable stopper, + @Assisted final FileSystem fs, + @Assisted final AtomicBoolean replicating, + @Assisted("logDir") final Path logDir, + @Assisted("oldLogDir") final Path oldLogDir, + final ReplicationSourceFactory replicationSourceFactory) { + this.replicationSourceFactory = replicationSourceFactory; this.sources = new ArrayList(); this.replicating = replicating; this.zkHelper = zkHelper; @@ -195,13 +202,14 @@ public class ReplicationSourceManager { /** * Add a new normal source to this region server + * * @param id the id of the peer cluster * @return the source that was created * @throws IOException */ public ReplicationSourceInterface addSource(String id) throws IOException { - ReplicationSourceInterface src = - getReplicationSource(this.conf, this.fs, this, stopper, replicating, id); + ReplicationSourceInterface src = this.replicationSourceFactory.create(conf, + fs, this, stopper, replicating, id); synchronized (this.hlogsById) { this.sources.add(src); this.hlogsById.put(id, new TreeSet()); @@ -213,7 +221,7 @@ public class ReplicationSourceManager { this.zkHelper.addLogToList(name, src.getPeerClusterZnode()); } catch (KeeperException ke) { String message = "Cannot add log to zk for" + - " replication when creating a new source"; + " replication when creating a new source"; stopper.stop(message); throw new IOException(message, ke); } @@ -294,40 +302,6 @@ public class ReplicationSourceManager { } /** - * Factory method to create a replication source - * @param conf the configuration to use - * @param fs the file system to use - * @param manager the manager to use - * @param stopper the stopper object for this region server - * @param replicating the status of the replication on this cluster - * @param peerId the id of the peer cluster - * @return the created source - * @throws IOException - */ - public ReplicationSourceInterface getReplicationSource( - final Configuration conf, - final FileSystem fs, - final ReplicationSourceManager manager, - final Stoppable stopper, - final AtomicBoolean replicating, - final String peerId) throws IOException { - ReplicationSourceInterface src; - try { - @SuppressWarnings("rawtypes") - Class c = Class.forName(conf.get("replication.replicationsource.implementation", - ReplicationSource.class.getCanonicalName())); - src = (ReplicationSourceInterface) c.newInstance(); - } catch (Exception e) { - LOG.warn("Passed replication source implementation throws errors, " + - "defaulting to ReplicationSource", e); - src = new ReplicationSource(); - - } - src.init(conf, fs, manager, stopper, replicating, peerId); - return src; - } - - /** * Transfer all the queues of the specified to this region server. * First it tries to grab a lock and if it works it will move the * znodes and finally will delete the old znodes. @@ -584,8 +558,8 @@ public class ReplicationSourceManager { for (Map.Entry> entry : newQueues.entrySet()) { String peerId = entry.getKey(); try { - ReplicationSourceInterface src = getReplicationSource(conf, - fs, ReplicationSourceManager.this, stopper, replicating, peerId); + ReplicationSourceInterface src = + replicationSourceFactory.create(conf, fs, ReplicationSourceManager.this, stopper, replicating, peerId); if (!zkHelper.getPeerClusters().containsKey(src.getPeerClusterId())) { src.terminate("Recovered queue doesn't belong to any current peer"); break; @@ -595,7 +569,7 @@ public class ReplicationSourceManager { src.enqueueLog(new Path(oldLogDir, hlog)); } src.startup(); - } catch (IOException e) { + } catch (Exception e) { // TODO manage it LOG.error("Failed creating a source", e); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManagerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManagerFactory.java new file mode 100644 index 0000000..9bf7c9d --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManagerFactory.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver; + +import com.google.inject.assistedinject.Assisted; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.replication.ReplicationZookeeper; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Interface of a Factory that can construct a ReplicationSourceManager. + * This will be used by guice in DI. + */ +@InterfaceAudience.Private +public interface ReplicationSourceManagerFactory { + + public ReplicationSourceManager create(final Configuration conf, + final ReplicationZookeeper zkHelper, + final Stoppable stopper, + final FileSystem fs, + final AtomicBoolean replicating, + @Assisted("logDir") final Path logDir, + @Assisted("oldLogDir") final Path oldLogDir); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManagerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManagerModule.java new file mode 100644 index 0000000..804833a --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManagerModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class ReplicationSourceManagerModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(ReplicationSourceManager.class, ReplicationSourceManager.class) + .build(ReplicationSourceManagerFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceModule.java new file mode 100644 index 0000000..a3e62fe --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class ReplicationSourceModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(ReplicationSourceInterface.class, ReplicationSource.class) + .build(ReplicationSourceFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java index 1b57aee..f56593f 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java @@ -19,7 +19,8 @@ package org.apache.hadoop.hbase.replication.regionserver.metrics; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; + +import javax.inject.Inject; /** * This class is for maintaining the various replication statistics for a sink and publishing them @@ -34,8 +35,10 @@ public class ReplicationSinkMetrics { private ReplicationMetricsSource rms; - public ReplicationSinkMetrics() { - rms = CompatibilitySingletonFactory.getInstance(ReplicationMetricsSource.class); + @Inject + public ReplicationSinkMetrics(ReplicationMetricsSource rms) { + this.rms = rms; + this.applyBatch(101); } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java index fe24d39..13db1e3 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java @@ -18,10 +18,12 @@ package org.apache.hadoop.hbase.replication.regionserver.metrics; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; + +import javax.inject.Inject; /** * This class is for maintaining the various replication statistics for a source and publishing them @@ -57,7 +59,8 @@ public class ReplicationSourceMetrics { * * @param id Name of the source this class is monitoring */ - public ReplicationSourceMetrics(String id) { + @Inject + public ReplicationSourceMetrics(@Assisted String id, ReplicationMetricsSource rms) { this.id = id; sizeOfLogQueKey = "source." + id + ".sizeOfLogQueue"; @@ -66,7 +69,7 @@ public class ReplicationSourceMetrics { logEditsFilteredKey = "source." + id + ".logEditsFiltered"; shippedBatchesKey = "source." + this.id + ".shippedBatches"; shippedOpsKey = "source." + this.id + ".shippedOps"; - rms = CompatibilitySingletonFactory.getInstance(ReplicationMetricsSource.class); + this.rms = rms; } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetricsFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetricsFactory.java new file mode 100644 index 0000000..dedcd5b --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetricsFactory.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver.metrics; + +import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationSourceMetrics; + +/** + * + */ +public interface ReplicationSourceMetricsFactory { + + public ReplicationSourceMetrics create(String peerClusterZnode); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetricsModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetricsModule.java new file mode 100644 index 0000000..422a55e --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetricsModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.regionserver.metrics; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class ReplicationSourceMetricsModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(ReplicationSourceMetrics.class, ReplicationSourceMetrics.class) + .build(ReplicationSourceMetricsFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 4a1dac5..823a548 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -28,6 +28,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.regex.Matcher; import java.util.regex.Pattern; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.lang.NotImplementedException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import com.google.common.primitives.Ints; +import javax.inject.Inject; /** * Implementation of {@link TableDescriptors} that reads descriptors from the @@ -111,11 +113,13 @@ public class FSTableDescriptors implements TableDescriptors { /** * @param fs * @param rootdir - * @param fsreadOnly True if we are read-only when it comes to filesystem - * operations; i.e. on remove, we do not do delete in fs. + * @param fsreadOnly True if we are read-only when it comes to filesystem operations; i.e. on + * remove, we do not do delete in fs. */ - public FSTableDescriptors(final FileSystem fs, final Path rootdir, - final boolean fsreadOnly) { + @Inject + public FSTableDescriptors(@Assisted final FileSystem fs, + @Assisted final Path rootdir, + @Assisted final boolean fsreadOnly) { super(); this.fs = fs; this.rootdir = rootdir; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorsFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorsFactory.java new file mode 100644 index 0000000..8cb06dc --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorsFactory.java @@ -0,0 +1,15 @@ +package org.apache.hadoop.hbase.util; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +/** + * + */ +public interface FSTableDescriptorsFactory { + + public FSTableDescriptors create(final FileSystem fs, + final Path rootdir, + final boolean fsreadOnly); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorsModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorsModule.java new file mode 100644 index 0000000..35c8bf2 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorsModule.java @@ -0,0 +1,17 @@ +package org.apache.hadoop.hbase.util; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class FSTableDescriptorsModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(FSTableDescriptors.class, FSTableDescriptors.class) + .build(FSTableDescriptorsFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index 598e851..0884816 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; -import java.lang.reflect.InvocationTargetException; import java.util.List; import org.apache.commons.logging.Log; @@ -29,9 +28,13 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.HMasterFactory; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HRegionServerFactory; import org.apache.hadoop.hbase.regionserver.ShutdownHook; +import javax.inject.Provider; + /** * Utility used running a cluster all in the one JVM. */ @@ -71,29 +74,16 @@ public class JVMClusterUtil { /** * Creates a {@link RegionServerThread}. * Call 'start' on the returned thread to make it run. - * @param c Configuration to use. - * @param hrsc Class to create. * @param index Used distinguishing the object returned. * @throws IOException * @return Region server added. */ public static JVMClusterUtil.RegionServerThread createRegionServerThread( - final Configuration c, final Class hrsc, + final HRegionServerFactory hrsf, + final Configuration conf, final int index) throws IOException { - HRegionServer server; - try { - server = hrsc.getConstructor(Configuration.class).newInstance(c); - } catch (InvocationTargetException ite) { - Throwable target = ite.getTargetException(); - throw new RuntimeException("Failed construction of RegionServer: " + - hrsc.toString() + ((target.getCause() != null)? - target.getCause().getMessage(): ""), target); - } catch (Exception e) { - IOException ioe = new IOException(); - ioe.initCause(e); - throw ioe; - } + HRegionServer server = hrsf.create(new Configuration(conf)); return new JVMClusterUtil.RegionServerThread(server, index); } @@ -118,29 +108,16 @@ public class JVMClusterUtil { /** * Creates a {@link MasterThread}. * Call 'start' on the returned thread to make it run. - * @param c Configuration to use. - * @param hmc Class to create. * @param index Used distinguishing the object returned. * @throws IOException * @return Master added. */ public static JVMClusterUtil.MasterThread createMasterThread( - final Configuration c, final Class hmc, + final HMasterFactory hmf, + final Configuration conf, final int index) throws IOException { - HMaster server; - try { - server = hmc.getConstructor(Configuration.class).newInstance(c); - } catch (InvocationTargetException ite) { - Throwable target = ite.getTargetException(); - throw new RuntimeException("Failed construction of Master: " + - hmc.toString() + ((target.getCause() != null)? - target.getCause().getMessage(): ""), target); - } catch (Exception e) { - IOException ioe = new IOException(); - ioe.initCause(e); - throw ioe; - } + HMaster server = hmf.create(conf); return new JVMClusterUtil.MasterThread(server, index); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java index c8b657a..4624a27 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java @@ -19,12 +19,15 @@ */ package org.apache.hadoop.hbase.util; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.Stoppable; +import javax.inject.Inject; + /** * Sleeper for current thread. * Sleeps for passed period. Also checks passed boolean and if interrupted, @@ -47,7 +50,8 @@ public class Sleeper { * @param stopper When {@link Stoppable#isStopped()} is true, this thread will * cleanup and exit cleanly. */ - public Sleeper(final int sleep, final Stoppable stopper) { + @Inject + public Sleeper(@Assisted final int sleep, @Assisted final Stoppable stopper) { this.period = sleep; this.stopper = stopper; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/SleeperFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/SleeperFactory.java new file mode 100644 index 0000000..39d9b99 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/SleeperFactory.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.util.Sleeper; + +/** + * + */ +public interface SleeperFactory { + + public Sleeper create(final int msgInterval, final Stoppable stoppable); +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/SleeperModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/SleeperModule.java new file mode 100644 index 0000000..59aae40 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/SleeperModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class SleeperModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(Sleeper.class, Sleeper.class) + .build(SleeperFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java index 230def2..5db80be 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java @@ -19,6 +19,7 @@ */ package org.apache.hadoop.hbase.zookeeper; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -28,6 +29,8 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.zookeeper.KeeperException; +import javax.inject.Inject; + /** * Tracker on cluster settings up in zookeeper. * This is not related to {@link ClusterStatus}. That class is a data structure @@ -47,7 +50,8 @@ public class ClusterStatusTracker extends ZooKeeperNodeTracker { * @param watcher * @param abortable */ - public ClusterStatusTracker(ZooKeeperWatcher watcher, Abortable abortable) { + @Inject + public ClusterStatusTracker(@Assisted ZooKeeperWatcher watcher, @Assisted Abortable abortable) { super(watcher, watcher.clusterStateZNode, abortable); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTrackerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTrackerFactory.java new file mode 100644 index 0000000..7b34b3e --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTrackerFactory.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.hadoop.hbase.Abortable; + +/** + * + */ +public interface ClusterStatusTrackerFactory { + + public ClusterStatusTracker create(ZooKeeperWatcher watcher, Abortable abortable); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTrackerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTrackerModule.java new file mode 100644 index 0000000..7188a1a --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTrackerModule.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class ClusterStatusTrackerModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(ClusterStatusTracker.class, ClusterStatusTracker.class) + .build(ClusterStatusTrackerFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java index 714fd0c..90d4e89 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.NavigableSet; import java.util.TreeSet; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -30,6 +31,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.zookeeper.KeeperException; +import javax.inject.Inject; + /** * Tracks the list of draining region servers via ZK. * @@ -52,8 +55,10 @@ public class DrainingServerTracker extends ZooKeeperListener { private NavigableSet drainingServers = new TreeSet(); private Abortable abortable; - public DrainingServerTracker(ZooKeeperWatcher watcher, - Abortable abortable, ServerManager serverManager) { + @Inject + public DrainingServerTracker(@Assisted ZooKeeperWatcher watcher, + @Assisted Abortable abortable, + @Assisted ServerManager serverManager) { super(watcher); this.abortable = abortable; this.serverManager = serverManager; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTrackerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTrackerFactory.java new file mode 100644 index 0000000..7bf2ad9 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTrackerFactory.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.master.ServerManager; + +/** + * + */ +public interface DrainingServerTrackerFactory { + + public DrainingServerTracker create(ZooKeeperWatcher watcher, + Abortable abortable, + ServerManager serverManager); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTrackerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTrackerModule.java new file mode 100644 index 0000000..736c4a5 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTrackerModule.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class DrainingServerTrackerModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(DrainingServerTracker.class, DrainingServerTracker.class) + .build(DrainingServerTrackerFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java index 6d61099..4c3fc6c 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; +import com.google.inject.assistedinject.Assisted; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.DeserializationException; @@ -29,6 +30,8 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.data.Stat; +import javax.inject.Inject; + /** * Manages the location of the current active Master for the RegionServer. *

@@ -60,7 +63,8 @@ public class MasterAddressTracker extends ZooKeeperNodeTracker { * @param watcher zk reference and watcher * @param abortable abortable in case of fatal error */ - public MasterAddressTracker(ZooKeeperWatcher watcher, Abortable abortable) { + @Inject + public MasterAddressTracker(@Assisted ZooKeeperWatcher watcher, @Assisted Abortable abortable) { super(watcher, watcher.getMasterAddressZNode(), abortable); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTrackerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTrackerFactory.java new file mode 100644 index 0000000..db19c08 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTrackerFactory.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.hadoop.hbase.Abortable; + +/** + * + */ +public interface MasterAddressTrackerFactory { + + public MasterAddressTracker create(ZooKeeperWatcher watcher, Abortable abortable); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTrackerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTrackerModule.java new file mode 100644 index 0000000..3b07214 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTrackerModule.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class MasterAddressTrackerModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(MasterAddressTracker.class, MasterAddressTracker.class) + .build(MasterAddressTrackerFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java index d95ff14..52b9146 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.NavigableSet; import java.util.TreeSet; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -33,6 +34,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.zookeeper.KeeperException; +import javax.inject.Inject; + /** * Tracks the online region servers via ZK. * @@ -50,8 +53,10 @@ public class RegionServerTracker extends ZooKeeperListener { private ServerManager serverManager; private Abortable abortable; - public RegionServerTracker(ZooKeeperWatcher watcher, - Abortable abortable, ServerManager serverManager) { + @Inject + public RegionServerTracker(@Assisted ZooKeeperWatcher watcher, + @Assisted Abortable abortable, + @Assisted ServerManager serverManager) { super(watcher); this.abortable = abortable; this.serverManager = serverManager; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTrackerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTrackerFactory.java new file mode 100644 index 0000000..d34e56d --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTrackerFactory.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.master.ServerManager; + +/** + * + */ +public interface RegionServerTrackerFactory { + + public RegionServerTracker create(ZooKeeperWatcher watcher, + Abortable abortable, + ServerManager serverManager); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTrackerModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTrackerModule.java new file mode 100644 index 0000000..606fdf2 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTrackerModule.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class RegionServerTrackerModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(RegionServerTracker.class, RegionServerTracker.class) + .build(RegionServerTrackerFactory.class)); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index 33bc1d0..5714b10 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -41,6 +42,8 @@ import org.apache.zookeeper.Watcher; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.data.ACL; +import javax.inject.Inject; + /** * Acts as the single ZooKeeper Watcher. One instance of this is instantiated * for each Master, RegionServer, and client process. @@ -132,8 +135,9 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { * @throws IOException * @throws ZooKeeperConnectionException */ - public ZooKeeperWatcher(Configuration conf, String descriptor, - Abortable abortable, boolean canCreateBaseZNode) + @Inject + public ZooKeeperWatcher(@Assisted Configuration conf, @Assisted String descriptor, + @Assisted Abortable abortable, @Assisted boolean canCreateBaseZNode) throws IOException, ZooKeeperConnectionException { this.conf = conf; // Capture a stack trace now. Will print it out later if problem so we can diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcherFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcherFactory.java new file mode 100644 index 0000000..1689544 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcherFactory.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; + +/** + * + */ +public interface ZooKeeperWatcherFactory { + + public ZooKeeperWatcher create(Configuration conf, + String descriptor, + Abortable abortable, + boolean canCreateBaseZNode); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcherModule.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcherModule.java new file mode 100644 index 0000000..dd26490 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcherModule.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import com.google.inject.AbstractModule; +import com.google.inject.assistedinject.FactoryModuleBuilder; + +/** + * + */ +public class ZooKeeperWatcherModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(ZooKeeperWatcher.class, ZooKeeperWatcher.class) + .build(ZooKeeperWatcherFactory.class)); + } +} diff --git hbase-server/src/main/resources/hbase-default.xml hbase-server/src/main/resources/hbase-default.xml index a42d94b..af33909 100644 --- hbase-server/src/main/resources/hbase-default.xml +++ hbase-server/src/main/resources/hbase-default.xml @@ -895,4 +895,9 @@ default log cleaners in the list as they will be overwritten in hbase-site.xml. + + + hbase.replication + true + diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseGuiceTestUtil.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseGuiceTestUtil.java new file mode 100644 index 0000000..552ce49 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseGuiceTestUtil.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Module; + +import java.util.Collection; +import java.util.Map; + +/** + * + */ +public class HBaseGuiceTestUtil { + public static Injector createDefaultInector() { + Collection modules = makeDefaultModules().values(); + + return Guice.createInjector(modules); + } + + public static Injector createMiniHBaseInjector() { + + Collection modules = makeMiniHBaseTestModules().values(); + return Guice.createInjector(modules); + + } + + public static Map makeMiniHBaseTestModules() { + Map modules = HBaseGuice.makeLocalHBaseModules(); + modules.put("MiniHBaseCluster", new MiniHBaseClusterModule()); + modules.put("HRegionServer", new MiniHBaseClusterRegionServerModule()); + return modules; + } + + public static Map makeDefaultModules() { + return HBaseGuice.makeDefaultModules(); + } + +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 767202e..6d5ec2b 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -44,6 +44,7 @@ import java.util.Random; import java.util.Set; import java.util.UUID; +import com.google.inject.Injector; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Jdk14Logger; @@ -187,6 +188,8 @@ public class HBaseTestingUtility { Compression.Algorithm.NONE, Compression.Algorithm.GZ }; + private Injector injector; + /** * Create all combinations of Bloom filters and compression algorithms for * testing. @@ -211,6 +214,7 @@ public class HBaseTestingUtility { public HBaseTestingUtility(Configuration conf) { this.conf = conf; + this.injector = HBaseGuiceTestUtil.createMiniHBaseInjector(); // a hbase checksum verification failure will cause unit tests to fail ChecksumUtil.generateExceptionForChecksumFailureForTest(true); @@ -231,6 +235,14 @@ public class HBaseTestingUtility { return this.conf; } + public Injector getInjector() { + return injector; + } + + public void setInjector(Injector injector) { + this.injector = injector; + } + /** * @return Where to write test data on local filesystem; usually * {@link #DEFAULT_BASE_TEST_DIRECTORY} @@ -648,7 +660,7 @@ public class HBaseTestingUtility { * @throws InterruptedException * @see {@link #startMiniCluster()} */ - public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, + public MiniHBaseCluster startMiniHBaseCluster( final int numMasters, final int numSlaves) throws IOException, InterruptedException { // Now do the mini hbase cluster. Set the hbase.rootdir in config. @@ -660,7 +672,7 @@ public class HBaseTestingUtility { conf.setInt("hbase.master.wait.on.regionservers.maxtostart", numSlaves); Configuration c = new Configuration(this.conf); - this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves); + this.hbaseCluster = injector.getInstance(MiniHBaseClusterFactory.class).create(conf, numMasters, numSlaves); // Don't leave here till we've done a successful scan of the .META. HTable t = new HTable(c, HConstants.META_TABLE_NAME); ResultScanner s = t.getScanner(new Scan()); @@ -682,7 +694,7 @@ public class HBaseTestingUtility { * @throws IOException */ public void restartHBaseCluster(int servers) throws IOException, InterruptedException { - this.hbaseCluster = new MiniHBaseCluster(this.conf, servers); + this.hbaseCluster = injector.getInstance(MiniHBaseClusterFactory.class).create(conf, 1, servers); // Don't leave here till we've done a successful scan of the .META. HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME); ResultScanner s = t.getScanner(new Scan()); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index e574303..85bcf7f 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -24,6 +24,7 @@ import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.List; +import com.google.inject.assistedinject.Assisted; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -31,17 +32,31 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.regionserver.CompactSplitThreadFactory; +import org.apache.hadoop.hbase.regionserver.CompactionCheckerFactory; +import org.apache.hadoop.hbase.regionserver.HRegionThriftServerFactory; +import org.apache.hadoop.hbase.regionserver.LeasesFactory; +import org.apache.hadoop.hbase.regionserver.MemStoreFlusherFactory; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationFactory; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.util.SleeperFactory; +import org.apache.hadoop.hbase.catalog.CatalogTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.MasterAddressTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcherFactory; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.RegionServerAccounting; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.io.MapWritable; + +import javax.inject.Inject; /** * This class creates a single process HBase cluster. @@ -54,31 +69,25 @@ import org.apache.hadoop.io.MapWritable; public class MiniHBaseCluster { static final Log LOG = LogFactory.getLog(MiniHBaseCluster.class.getName()); private Configuration conf; + private LocalHBaseClusterFactory localHBaseClusterFactory; public LocalHBaseCluster hbaseCluster; private static int index; /** * Start a MiniHBaseCluster. * @param conf Configuration to be used for cluster - * @param numRegionServers initial number of region servers to start. - * @throws IOException - */ - public MiniHBaseCluster(Configuration conf, int numRegionServers) - throws IOException, InterruptedException { - this(conf, 1, numRegionServers); - } - - /** - * Start a MiniHBaseCluster. - * @param conf Configuration to be used for cluster * @param numMasters initial number of masters to start. * @param numRegionServers initial number of region servers to start. * @throws IOException */ - public MiniHBaseCluster(Configuration conf, int numMasters, - int numRegionServers) + @Inject + public MiniHBaseCluster(@Assisted Configuration conf, + @Assisted("numMasters") int numMasters, + @Assisted("numRegionServers") int numRegionServers, + LocalHBaseClusterFactory localHBaseClusterFactory) throws IOException, InterruptedException { this.conf = conf; + this.localHBaseClusterFactory = localHBaseClusterFactory; conf.set(HConstants.MASTER_PORT, "0"); init(numMasters, numRegionServers); } @@ -98,19 +107,54 @@ public class MiniHBaseCluster { private User user = null; public static boolean TEST_SKIP_CLOSE = false; - public MiniHBaseClusterRegionServer(Configuration conf) + + /** + * Starts a HRegionServer at the default location + * + * @param conf + * @throws java.io.IOException + * @throws InterruptedException + */ + @Inject + public MiniHBaseClusterRegionServer(@Assisted Configuration conf, + SleeperFactory sleeperFactory, + RpcServerFactory rpcServerFactory, + ReplicationFactory replicationFactory, + RegionServerAccounting rsAccounting, + ZooKeeperWatcherFactory zooKeeperWatcherFactory, + MasterAddressTrackerFactory masterAddressTrackerFactory, + ClusterStatusTrackerFactory clusterStatusTrackerFactory, + CatalogTrackerFactory catalogTrackerFactory, + MemStoreFlusherFactory memStoreFlusherFactory, + CompactSplitThreadFactory compactSplitThreadFactory, + CompactionCheckerFactory compactionCheckerFactory, + LeasesFactory leasesFactory, + HRegionThriftServerFactory hRegionThriftServerFactory) throws IOException, InterruptedException { - super(conf); - this.user = User.getCurrent(); + super(conf, + sleeperFactory, + rpcServerFactory, + replicationFactory, + rsAccounting, + zooKeeperWatcherFactory, + masterAddressTrackerFactory, + clusterStatusTrackerFactory, + catalogTrackerFactory, + memStoreFlusherFactory, + compactSplitThreadFactory, + compactionCheckerFactory, + leasesFactory, + hRegionThriftServerFactory); + user = User.getCurrent(); } /* - * @param c - * @param currentfs We return this if we did not make a new one. - * @param uniqueName Same name used to help identify the created fs. - * @return A new fs instance if we are up on DistributeFileSystem. - * @throws IOException - */ + * @param c + * @param currentfs We return this if we did not make a new one. + * @param uniqueName Same name used to help identify the created fs. + * @return A new fs instance if we are up on DistributeFileSystem. + * @throws IOException + */ @Override protected void handleReportForDutyResponse( @@ -190,15 +234,14 @@ public class MiniHBaseCluster { throws IOException, InterruptedException { try { // start up a LocalHBaseCluster - hbaseCluster = new LocalHBaseCluster(conf, nMasterNodes, 0, - HMaster.class, MiniHBaseCluster.MiniHBaseClusterRegionServer.class); + hbaseCluster = localHBaseClusterFactory.create(conf, nMasterNodes,0); // manually add the regionservers as other users for (int i=0; i modules = HBaseGuiceTestUtil.makeMiniHBaseTestModules(); + modules.remove("HMaster"); + modules.remove("HRegionServer"); + modules.put("TestLocalHBaseCluster", new TestLocalHBaseClusterModule()); + + Injector injector = Guice.createInjector(modules.values()); + LocalHBaseCluster cluster = injector.getInstance(LocalHBaseClusterFactory.class).create(conf,1,1); + // Can we cast back to our master class? try { ((MyHMaster)cluster.getMaster(0)).setZKCluster(zkCluster); @@ -89,9 +134,46 @@ public class TestLocalHBaseCluster { public static class MyHMaster extends HMaster { private MiniZooKeeperCluster zkcluster = null; - public MyHMaster(Configuration conf) throws IOException, KeeperException, - InterruptedException { - super(conf); + @Inject + public MyHMaster(@Assisted final Configuration conf, + final RpcServerFactory rpcServerFactory, + final SleeperFactory sleeperFactory, + final ZooKeeperWatcherFactory zooKeeperWatcherFactory, + final MasterMetricsFactory masterMetricsFactory, + final ClusterStatusTrackerFactory clusterStatusTrackerFactory, + final ActiveMasterManagerFactory activeMasterManagerFactory, + final CatalogTrackerFactory catalogTrackerFactory, + final AssignmentManagerFactory assignmentManagerFactory, + final RegionServerTrackerFactory regionServerTrackerFactory, + final DrainingServerTrackerFactory drainingServerTrackerFactory, + final LoadBalancerFactory loadBalancerFactory, + final MasterFileSystemFactory masterFileSystemFactory, + final FSTableDescriptorsFactory fsTableDescriptorsFactory, + final ExecutorServiceFactory executorServiceFactory, + final ServerManagerFactory serverManagerFactory, + final MasterCoprocessorHostFactory masterCoprocessorHostFactory, + final CatalogJanitorFactory catalogJanitorFactory, + final MBeanSource mBeanSource) + throws IOException, KeeperException, InterruptedException { + super(conf, + rpcServerFactory, + sleeperFactory, + zooKeeperWatcherFactory, + masterMetricsFactory, + clusterStatusTrackerFactory, + activeMasterManagerFactory, + catalogTrackerFactory, + assignmentManagerFactory, + regionServerTrackerFactory, + drainingServerTrackerFactory, + loadBalancerFactory, + masterFileSystemFactory, + fsTableDescriptorsFactory, + executorServiceFactory, + serverManagerFactory, + masterCoprocessorHostFactory, + catalogJanitorFactory, + mBeanSource); } @Override @@ -116,13 +198,62 @@ public class TestLocalHBaseCluster { */ public static class MyHRegionServer extends HRegionServer { - public MyHRegionServer(Configuration conf) throws IOException, - InterruptedException { - super(conf); + /** + * Starts a HRegionServer at the default location + * + * @param conf + * @throws java.io.IOException + * @throws InterruptedException + */ + @Inject + public MyHRegionServer(@Assisted Configuration conf, + SleeperFactory sleeperFactory, + RpcServerFactory rpcServerFactory, + ReplicationFactory replicationFactory, + RegionServerAccounting rsAccounting, + ZooKeeperWatcherFactory zooKeeperWatcherFactory, + MasterAddressTrackerFactory masterAddressTrackerFactory, + ClusterStatusTrackerFactory clusterStatusTrackerFactory, + CatalogTrackerFactory catalogTrackerFactory, + MemStoreFlusherFactory memStoreFlusherFactory, + CompactSplitThreadFactory compactSplitThreadFactory, + CompactionCheckerFactory compactionCheckerFactory, + LeasesFactory leasesFactory, + HRegionThriftServerFactory hRegionThriftServerFactory) + throws IOException, InterruptedException { + super(conf, + sleeperFactory, + rpcServerFactory, + replicationFactory, + rsAccounting, + zooKeeperWatcherFactory, + masterAddressTrackerFactory, + clusterStatusTrackerFactory, + catalogTrackerFactory, + memStoreFlusherFactory, + compactSplitThreadFactory, + compactionCheckerFactory, + leasesFactory, + hRegionThriftServerFactory); } public int echo(int val) { return val; } } + + public static class TestLocalHBaseClusterModule extends AbstractModule { + + @Override + protected void configure() { + + install(new FactoryModuleBuilder() + .implement(HMaster.class, MyHMaster.class) + .build(HMasterFactory.class)); + + install(new FactoryModuleBuilder() + .implement(HRegionServer.class, MyHRegionServer.class) + .build(HRegionServerFactory.class)); + } + } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java index 6dabc27..ce2e058 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java @@ -25,6 +25,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseGuiceTestUtil; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManagerFactory; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager; import org.junit.BeforeClass; import org.junit.Test; @@ -53,9 +55,7 @@ public class TestReplicationAdmin { private static ReplicationAdmin admin; private static AtomicBoolean replicating = new AtomicBoolean(true); - /** - * @throws java.lang.Exception - */ + /** @throws java.lang.Exception */ @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniZKCluster(); @@ -66,15 +66,18 @@ public class TestReplicationAdmin { HConstants.HREGION_OLDLOGDIR_NAME); Path logDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HREGION_LOGDIR_NAME); - manager = new ReplicationSourceManager(admin.getReplicationZk(), conf, - // The following stopper never stops so that we can respond - // to zk notification - new Stoppable() { - @Override - public void stop(String why) {} - @Override - public boolean isStopped() {return false;} - }, FileSystem.get(conf), replicating, logDir, oldLogDir); + manager = + HBaseGuiceTestUtil.createDefaultInector().getInstance(ReplicationSourceManagerFactory.class) + .create(conf, admin.getReplicationZk(), + new Stoppable() { + @Override + public void stop(String why) { } + + @Override + public boolean isStopped() { + return false; + } + }, FileSystem.get(conf), replicating, logDir, oldLogDir); } /** diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java index c793c07..b595acb 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java @@ -30,8 +30,10 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import com.google.inject.Injector; import org.apache.hadoop.hbase.DeserializationException; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseGuiceTestUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -52,7 +54,7 @@ import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.balancer.DefaultLoadBalancer; -import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; +import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactoryImpl; import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; @@ -331,8 +333,8 @@ public class TestAssignmentManager { // We need a mocked catalog tracker. CatalogTracker ct = Mockito.mock(CatalogTracker.class); - LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server - .getConfiguration()); + LoadBalancer balancer = new LoadBalancerFactoryImpl(server.getConfiguration(), + HBaseGuiceTestUtil.createDefaultInector()).create(); // Create an AM. AssignmentManager am = new AssignmentManager(this.server, this.serverManager, ct, balancer, executor, null); @@ -398,8 +400,8 @@ public class TestAssignmentManager { // We need a mocked catalog tracker. CatalogTracker ct = Mockito.mock(CatalogTracker.class); - LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server - .getConfiguration()); + LoadBalancer balancer = new LoadBalancerFactoryImpl(server.getConfiguration(), + HBaseGuiceTestUtil.createDefaultInector()).create();; // Create an AM. AssignmentManager am = new AssignmentManager(this.server, this.serverManager, ct, balancer, executor, null); @@ -499,7 +501,8 @@ public class TestAssignmentManager { ExecutorService executor = startupMasterExecutor("testSSHWhenDisableTableInProgress"); // We need a mocked catalog tracker. CatalogTracker ct = Mockito.mock(CatalogTracker.class); - LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server.getConfiguration()); + LoadBalancer balancer = new LoadBalancerFactoryImpl(server.getConfiguration(), + HBaseGuiceTestUtil.createDefaultInector()).create(); // Create an AM. AssignmentManager am = new AssignmentManager(this.server, this.serverManager, ct, balancer, executor, null); @@ -617,8 +620,8 @@ public class TestAssignmentManager { Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, hri, -1)).thenReturn(true); // Need a mocked catalog tracker. CatalogTracker ct = Mockito.mock(CatalogTracker.class); - LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server - .getConfiguration()); + LoadBalancer balancer = new LoadBalancerFactoryImpl(server.getConfiguration(), + HBaseGuiceTestUtil.createDefaultInector()).create(); // Create an AM. AssignmentManager am = new AssignmentManager(this.server, this.serverManager, ct, balancer, null, null); @@ -823,7 +826,8 @@ public class TestAssignmentManager { destServers.add(SERVERNAME_A); Mockito.when(this.serverManager.createDestinationServersList()).thenReturn(destServers); // To avoid cast exception in DisableTableHandler process. - Server server = new HMaster(HTU.getConfiguration()); + Injector injector = HBaseGuiceTestUtil.createDefaultInector(); + Server server = injector.getInstance(HMaster.class); AssignmentManagerWithExtrasForTesting am = setUpMockedAssignmentManager(server, this.serverManager); AtomicBoolean gate = new AtomicBoolean(false); @@ -937,7 +941,8 @@ public class TestAssignmentManager { Mockito.when(ct.getConnection()).thenReturn(connection); // Create and startup an executor. Used by AM handling zk callbacks. ExecutorService executor = startupMasterExecutor("mockedAMExecutor"); - this.balancer = LoadBalancerFactory.getLoadBalancer(server.getConfiguration()); + this.balancer = new LoadBalancerFactoryImpl(server.getConfiguration(), + HBaseGuiceTestUtil.createDefaultInector()).create(); AssignmentManagerWithExtrasForTesting am = new AssignmentManagerWithExtrasForTesting( server, manager, ct, this.balancer, executor); return am; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index a54b1cf..5aaa7b8 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -176,7 +176,7 @@ public class TestCatalogJanitor { private final AssignmentManager asm; MockMasterServices(final Server server) throws IOException { - this.mfs = new MasterFileSystem(server, this, null, false); + this.mfs = new MasterFileSystem(server.getConfiguration(), server, this, null, false); this.asm = Mockito.mock(AssignmentManager.class); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java index 956f21d..1cd353d 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java @@ -25,6 +25,7 @@ import static org.junit.Assert.fail; import java.net.InetSocketAddress; +import com.google.inject.Injector; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.ipc.HBaseRPC; @@ -45,7 +46,8 @@ public class TestHMasterRPCException { Configuration conf = TEST_UTIL.getConfiguration(); conf.set(HConstants.MASTER_PORT, "0"); - HMaster hm = new HMaster(conf); + Injector injector = HBaseGuiceTestUtil.createDefaultInector(); + HMaster hm = injector.getInstance(HMasterFactory.class).create(conf); ServerName sm = hm.getServerName(); InetSocketAddress isa = new InetSocketAddress(sm.getHostname(), sm.getPort()); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index e3e69d9..c300c0b 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -24,10 +24,18 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; +import java.util.Map; +import com.google.inject.AbstractModule; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Module; +import com.google.inject.assistedinject.Assisted; +import com.google.inject.assistedinject.FactoryModuleBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.HBaseGuiceTestUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -36,16 +44,28 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.CatalogTrackerFactory; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.executor.ExecutorServiceFactory; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; +import org.apache.hadoop.hbase.master.metrics.MasterMetricsFactory; +import org.apache.hadoop.hbase.metrics.MBeanSource; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptorsFactory; +import org.apache.hadoop.hbase.util.SleeperFactory; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.DrainingServerTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.RegionServerTrackerFactory; import org.apache.hadoop.hbase.zookeeper.RootRegionTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcherFactory; import org.apache.zookeeper.KeeperException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; @@ -62,6 +82,8 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.inject.Inject; + /** * Standup the master and fake it to test various aspects of master function. * Does NOT spin up a mini hbase nor mini dfs cluster testing master (it does @@ -118,12 +140,134 @@ public class TestMasterNoCluster { @Test (timeout=30000) public void testStopDuringStart() throws IOException, KeeperException, InterruptedException { - HMaster master = new HMaster(TESTUTIL.getConfiguration()); + + Injector injector = HBaseGuiceTestUtil.createDefaultInector(); + HMaster master = injector.getInstance(HMasterFactory.class).create(TESTUTIL.getConfiguration()); master.start(); // Immediately have it stop. We used hang in assigning root. master.stopMaster(); master.join(); } + public static class MockServerManagerFactory implements ServerManagerFactory { + + @Override + public ServerManager create(Server master, + MasterServices services, + boolean connect) throws IOException { + ServerManager spy = Mockito.spy(new ServerManager(master, services, connect)); + // Fake a successful open. + Mockito.doReturn(RegionOpeningState.OPENED).when(spy). + sendRegionOpen((ServerName)Mockito.any(), (HRegionInfo)Mockito.any(), + Mockito.anyInt()); + return spy; + } + } + + public static class MockCatalogTrackerFactory implements CatalogTrackerFactory { + + private MockRegionServer rs0; + + public MockCatalogTrackerFactory(MockRegionServer rs0) { + this.rs0 = rs0; + } + + @Override + public CatalogTracker create(ZooKeeperWatcher zk, + Configuration conf, + Abortable abortable, + int defaultTimeout) throws IOException { + HConnection connection = + HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(), + rs0, rs0, rs0.getServerName(), HRegionInfo.ROOT_REGIONINFO); + return new CatalogTracker(zk, conf, connection, abortable, defaultTimeout); + } + } + + public static class MockInetHMaster extends HMaster { + private ServerName [] sns; + + /** + * Initializes the HMaster. The steps are as follows: + *

+ *

  1. Initialize HMaster RPC and address
  2. Connect to ZooKeeper.
+ *

+ * Remaining steps of initialization occur in {@link #run()} so that they run in their own thread + * rather than within the context of the constructor. + * + * @throws InterruptedException + */ + @Inject + public MockInetHMaster(@Assisted final Configuration conf, + final RpcServerFactory rpcServerFactory, + final SleeperFactory sleeperFactory, + final ZooKeeperWatcherFactory zooKeeperWatcherFactory, + final MasterMetricsFactory masterMetricsFactory, + final ClusterStatusTrackerFactory clusterStatusTrackerFactory, + final ActiveMasterManagerFactory activeMasterManagerFactory, + final CatalogTrackerFactory catalogTrackerFactory, + final AssignmentManagerFactory assignmentManagerFactory, + final RegionServerTrackerFactory regionServerTrackerFactory, + final DrainingServerTrackerFactory drainingServerTrackerFactory, + final LoadBalancerFactory loadBalancerFactory, + final MasterFileSystemFactory masterFileSystemFactory, + final FSTableDescriptorsFactory fsTableDescriptorsFactory, + final ExecutorServiceFactory executorServiceFactory, + final ServerManagerFactory serverManagerFactory, + final MasterCoprocessorHostFactory masterCoprocessorHostFactory, + final CatalogJanitorFactory catalogJanitorFactory, + final MBeanSource mBeanSource) + throws IOException, KeeperException, InterruptedException { + super(conf, + rpcServerFactory, + sleeperFactory, + zooKeeperWatcherFactory, + masterMetricsFactory, + clusterStatusTrackerFactory, + activeMasterManagerFactory, + catalogTrackerFactory, + assignmentManagerFactory, + regionServerTrackerFactory, + drainingServerTrackerFactory, + loadBalancerFactory, + masterFileSystemFactory, + fsTableDescriptorsFactory, + executorServiceFactory, + serverManagerFactory, + masterCoprocessorHostFactory, + catalogJanitorFactory, + mBeanSource); + } + + InetAddress getRemoteInetAddress(final int port, final long serverStartCode) + throws UnknownHostException { + // Return different address dependent on port passed. + ServerName sn = sns[port]; + return InetAddress.getByAddress(sn.getHostname(), + new byte [] {10, 0, 0, (byte)sn.getPort()}); + } + + public void setSns(ServerName[] sns) { + this.sns = sns; + } + } + + public static class FailoverTestModule extends AbstractModule { + private MockRegionServer rs0; + + public FailoverTestModule(MockRegionServer rs0) { + this.rs0 = rs0; + } + + @Override + protected void configure() { + bind(ServerManagerFactory.class).to(MockServerManagerFactory.class); + MockCatalogTrackerFactory mockCatalogTrackerFactory = new MockCatalogTrackerFactory(rs0); + bind(CatalogTrackerFactory.class).toInstance(mockCatalogTrackerFactory); + install(new FactoryModuleBuilder() + .implement(HMaster.class, MockInetHMaster.class) + .build(HMasterFactory.class)); + } + } /** * Test master failover. @@ -169,46 +313,18 @@ public class TestMasterNoCluster { }; rs1.setNextResults(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), results); + Map moduleMap = HBaseGuiceTestUtil.makeDefaultModules(); + moduleMap.remove("HMaster"); + moduleMap.remove("CatalogTracker"); + moduleMap.remove("ServerManager"); + moduleMap.put("TEST", new FailoverTestModule(rs0)); + Injector inj = Guice.createInjector(moduleMap.values()); // Create master. Subclass to override a few methods so we can insert mocks // and get notification on transitions. We need to fake out any rpcs the // master does opening/closing regions. Also need to fake out the address // of the 'remote' mocked up regionservers. - HMaster master = new HMaster(conf) { - InetAddress getRemoteInetAddress(final int port, final long serverStartCode) - throws UnknownHostException { - // Return different address dependent on port passed. - ServerName sn = sns[port]; - return InetAddress.getByAddress(sn.getHostname(), - new byte [] {10, 0, 0, (byte)sn.getPort()}); - } - - @Override - ServerManager createServerManager(Server master, MasterServices services) - throws IOException { - ServerManager sm = super.createServerManager(master, services); - // Spy on the created servermanager - ServerManager spy = Mockito.spy(sm); - // Fake a successful open. - Mockito.doReturn(RegionOpeningState.OPENED).when(spy). - sendRegionOpen((ServerName)Mockito.any(), (HRegionInfo)Mockito.any(), - Mockito.anyInt()); - return spy; - } - - @Override - CatalogTracker createCatalogTracker(ZooKeeperWatcher zk, - Configuration conf, Abortable abortable, int defaultTimeout) - throws IOException { - // Insert a mock for the connection used by the CatalogTracker. Any - // regionserver should do. Use TESTUTIL.getConfiguration rather than - // the conf from the master; the conf will already have an HConnection - // associate so the below mocking of a connection will fail. - HConnection connection = - HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(), - rs0, rs0, rs0.getServerName(), HRegionInfo.ROOT_REGIONINFO); - return new CatalogTracker(zk, conf, connection, abortable, defaultTimeout); - } - }; + HMaster master = inj.getInstance(HMasterFactory.class).create(conf); + ((MockInetHMaster) master).setSns(sns); master.start(); try { @@ -252,49 +368,23 @@ public class TestMasterNoCluster { final long now = System.currentTimeMillis(); // Name for our single mocked up regionserver. final ServerName sn = new ServerName("0.example.org", 0, now); + final ServerName [] sns = new ServerName [] {sn }; // Here is our mocked up regionserver. Create it now. Need it setting up // master next. final MockRegionServer rs0 = new MockRegionServer(conf, sn); + Map moduleMap = HBaseGuiceTestUtil.makeDefaultModules(); + moduleMap.remove("HMaster"); + moduleMap.remove("CatalogTracker"); + moduleMap.remove("ServerManager"); + moduleMap.put("TEST", new FailoverTestModule(rs0)); + Injector inj = Guice.createInjector(moduleMap.values()); // Create master. Subclass to override a few methods so we can insert mocks // and get notification on transitions. We need to fake out any rpcs the // master does opening/closing regions. Also need to fake out the address // of the 'remote' mocked up regionservers. - HMaster master = new HMaster(conf) { - InetAddress getRemoteInetAddress(final int port, final long serverStartCode) - throws UnknownHostException { - // Interject an unchecked, nonsense InetAddress; i.e. no resolve. - return InetAddress.getByAddress(rs0.getServerName().getHostname(), - new byte [] {10, 0, 0, 0}); - } - - @Override - ServerManager createServerManager(Server master, MasterServices services) - throws IOException { - ServerManager sm = super.createServerManager(master, services); - // Spy on the created servermanager - ServerManager spy = Mockito.spy(sm); - // Fake a successful open. - Mockito.doReturn(RegionOpeningState.OPENED).when(spy). - sendRegionOpen((ServerName)Mockito.any(), (HRegionInfo)Mockito.any(), - Mockito.anyInt()); - return spy; - } - - @Override - CatalogTracker createCatalogTracker(ZooKeeperWatcher zk, - Configuration conf, Abortable abortable, int defaultTimeout) - throws IOException { - // Insert a mock for the connection used by the CatalogTracker. Use - // TESTUTIL.getConfiguration rather than the conf from the master; the - // conf will already have an HConnection associate so the below mocking - // of a connection will fail. - HConnection connection = - HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(), - rs0, rs0, rs0.getServerName(), HRegionInfo.ROOT_REGIONINFO); - return new CatalogTracker(zk, conf, connection, abortable, defaultTimeout); - } - }; + HMaster master = inj.getInstance(HMasterFactory.class).create(conf); + ((MockInetHMaster) master).setSns(sns); master.start(); LOG.info("Master has started"); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java index fa177ae..dca839c 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java @@ -23,8 +23,15 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationFactory; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.util.SleeperFactory; +import org.apache.hadoop.hbase.catalog.CatalogTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.MasterAddressTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcherFactory; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; @@ -41,8 +48,42 @@ import com.google.protobuf.ServiceException; public class OOMERegionServer extends HRegionServer { private List retainer = new ArrayList(); - public OOMERegionServer(HBaseConfiguration conf) throws IOException, InterruptedException { - super(conf); + /** + * Starts a HRegionServer at the default location + * + * @param conf + * @throws java.io.IOException + * @throws InterruptedException + */ + public OOMERegionServer(Configuration conf, + SleeperFactory sleeperFactory, + RpcServerFactory rpcServerFactory, + ReplicationFactory replicationFactory, + RegionServerAccounting rsAccounting, + ZooKeeperWatcherFactory zooKeeperWatcherFactory, + MasterAddressTrackerFactory masterAddressTrackerFactory, + ClusterStatusTrackerFactory clusterStatusTrackerFactory, + CatalogTrackerFactory catalogTrackerFactory, + MemStoreFlusherFactory memStoreFlusherFactory, + CompactSplitThreadFactory compactSplitThreadFactory, + CompactionCheckerFactory compactionCheckerFactory, + LeasesFactory leasesFactory, + HRegionThriftServerFactory hRegionThriftServerFactory) + throws IOException, InterruptedException { + super(conf, + sleeperFactory, + rpcServerFactory, + replicationFactory, + rsAccounting, + zooKeeperWatcherFactory, + masterAddressTrackerFactory, + clusterStatusTrackerFactory, + catalogTrackerFactory, + memStoreFlusherFactory, + compactSplitThreadFactory, + compactionCheckerFactory, + leasesFactory, + hRegionThriftServerFactory); } public void put(byte [] regionName, Put put) diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenMasterInitializing.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenMasterInitializing.java index d77f3d8..bd5e8e8 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenMasterInitializing.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenMasterInitializing.java @@ -37,21 +37,39 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.catalog.CatalogTrackerFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.executor.ExecutorServiceFactory; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.master.ActiveMasterManagerFactory; +import org.apache.hadoop.hbase.master.AssignmentManagerFactory; +import org.apache.hadoop.hbase.master.CatalogJanitorFactory; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterCoprocessorHostFactory; import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.master.MasterFileSystemFactory; import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.ServerManagerFactory; import org.apache.hadoop.hbase.master.TestMasterFailover; +import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; +import org.apache.hadoop.hbase.master.metrics.MasterMetricsFactory; +import org.apache.hadoop.hbase.metrics.MBeanSource; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptorsFactory; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; +import org.apache.hadoop.hbase.util.SleeperFactory; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.DrainingServerTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.RegionServerTrackerFactory; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcherFactory; import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -94,9 +112,45 @@ public class TestRSKilledWhenMasterInitializing { public static class TestingMaster extends HMaster { private boolean logSplit = false; - public TestingMaster(Configuration conf) throws IOException, - KeeperException, InterruptedException { - super(conf); + public TestingMaster(final Configuration conf, + final RpcServerFactory rpcServerFactory, + final SleeperFactory sleeperFactory, + final ZooKeeperWatcherFactory zooKeeperWatcherFactory, + final MasterMetricsFactory masterMetricsFactory, + final ClusterStatusTrackerFactory clusterStatusTrackerFactory, + final ActiveMasterManagerFactory activeMasterManagerFactory, + final CatalogTrackerFactory catalogTrackerFactory, + final AssignmentManagerFactory assignmentManagerFactory, + final RegionServerTrackerFactory regionServerTrackerFactory, + final DrainingServerTrackerFactory drainingServerTrackerFactory, + final LoadBalancerFactory loadBalancerFactory, + final MasterFileSystemFactory masterFileSystemFactory, + final FSTableDescriptorsFactory fsTableDescriptorsFactory, + final ExecutorServiceFactory executorServiceFactory, + final ServerManagerFactory serverManagerFactory, + final MasterCoprocessorHostFactory masterCoprocessorHostFactory, + final CatalogJanitorFactory catalogJanitorFactory, + final MBeanSource mBeanSource) + throws IOException, KeeperException, InterruptedException { + super(conf, + rpcServerFactory, + sleeperFactory, + zooKeeperWatcherFactory, + masterMetricsFactory, + clusterStatusTrackerFactory, + activeMasterManagerFactory, + catalogTrackerFactory, + assignmentManagerFactory, + regionServerTrackerFactory, + drainingServerTrackerFactory, + loadBalancerFactory, + masterFileSystemFactory, + fsTableDescriptorsFactory, + executorServiceFactory, + serverManagerFactory, + masterCoprocessorHostFactory, + catalogJanitorFactory, + mBeanSource); } @Override diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcMetrics.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcMetrics.java index 0a90f0f..23f3074 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcMetrics.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcMetrics.java @@ -23,13 +23,20 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.HashMap; import java.util.Map; -import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.HBaseGuiceTestUtil; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationFactory; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.util.SleeperFactory; +import org.apache.hadoop.hbase.catalog.CatalogTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.MasterAddressTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcherFactory; import org.apache.hadoop.hbase.ipc.HBaseRpcMetrics; import org.apache.hadoop.metrics.ContextFactory; import org.apache.hadoop.metrics.MetricsContext; @@ -57,11 +64,43 @@ public class TestRpcMetrics { */ public static class TestRegionServer extends HRegionServer { - public TestRegionServer(Configuration conf) - throws IOException, InterruptedException { - super(conf); - // register custom metrics interface + /** + * Starts a HRegionServer at the default location + * + * @param conf + * @throws java.io.IOException + * @throws InterruptedException + */ + public TestRegionServer(Configuration conf, + SleeperFactory sleeperFactory, + RpcServerFactory rpcServerFactory, + ReplicationFactory replicationFactory, + RegionServerAccounting rsAccounting, + ZooKeeperWatcherFactory zooKeeperWatcherFactory, + MasterAddressTrackerFactory masterAddressTrackerFactory, + ClusterStatusTrackerFactory clusterStatusTrackerFactory, + CatalogTrackerFactory catalogTrackerFactory, + MemStoreFlusherFactory memStoreFlusherFactory, + CompactSplitThreadFactory compactSplitThreadFactory, + CompactionCheckerFactory compactionCheckerFactory, + LeasesFactory leasesFactory, + HRegionThriftServerFactory hRegionThriftServerFactory) + throws IOException, InterruptedException { + super(conf, + sleeperFactory, + rpcServerFactory, + replicationFactory, + rsAccounting, + zooKeeperWatcherFactory, + masterAddressTrackerFactory, + clusterStatusTrackerFactory, + catalogTrackerFactory, + memStoreFlusherFactory, + compactSplitThreadFactory, + compactionCheckerFactory, + leasesFactory, + hRegionThriftServerFactory); getRpcMetrics().createMetrics(new Class[]{TestMetrics.class}, true); } @@ -120,7 +159,8 @@ public class TestRpcMetrics { @Test public void testCustomMetrics() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.regionserver.port", 0); - TestRegionServer rs = new TestRegionServer(TEST_UTIL.getConfiguration()); + TestRegionServer rs = HBaseGuiceTestUtil.createDefaultInector() + .getInstance(TestRegionServer.class); rs.incTest(5); // wait for metrics context update diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 765115a..1cc99fb 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -32,19 +32,37 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.catalog.CatalogTrackerFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.executor.EventHandler.EventType; +import org.apache.hadoop.hbase.executor.ExecutorServiceFactory; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.master.ActiveMasterManagerFactory; +import org.apache.hadoop.hbase.master.AssignmentManagerFactory; +import org.apache.hadoop.hbase.master.CatalogJanitorFactory; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterCoprocessorHostFactory; +import org.apache.hadoop.hbase.master.MasterFileSystemFactory; +import org.apache.hadoop.hbase.master.ServerManagerFactory; +import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; import org.apache.hadoop.hbase.master.handler.SplitRegionHandler; +import org.apache.hadoop.hbase.master.metrics.MasterMetricsFactory; +import org.apache.hadoop.hbase.metrics.MBeanSource; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptorsFactory; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; +import org.apache.hadoop.hbase.util.SleeperFactory; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.DrainingServerTrackerFactory; +import org.apache.hadoop.hbase.zookeeper.RegionServerTrackerFactory; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcherFactory; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NodeExistsException; import org.apache.zookeeper.data.Stat; @@ -722,9 +740,45 @@ public class TestSplitTransactionOnCluster { public static class MockMasterWithoutCatalogJanitor extends HMaster { - public MockMasterWithoutCatalogJanitor(Configuration conf) throws IOException, KeeperException, - InterruptedException { - super(conf); + public MockMasterWithoutCatalogJanitor(final Configuration conf, + final RpcServerFactory rpcServerFactory, + final SleeperFactory sleeperFactory, + final ZooKeeperWatcherFactory zooKeeperWatcherFactory, + final MasterMetricsFactory masterMetricsFactory, + final ClusterStatusTrackerFactory clusterStatusTrackerFactory, + final ActiveMasterManagerFactory activeMasterManagerFactory, + final CatalogTrackerFactory catalogTrackerFactory, + final AssignmentManagerFactory assignmentManagerFactory, + final RegionServerTrackerFactory regionServerTrackerFactory, + final DrainingServerTrackerFactory drainingServerTrackerFactory, + final LoadBalancerFactory loadBalancerFactory, + final MasterFileSystemFactory masterFileSystemFactory, + final FSTableDescriptorsFactory fsTableDescriptorsFactory, + final ExecutorServiceFactory executorServiceFactory, + final ServerManagerFactory serverManagerFactory, + final MasterCoprocessorHostFactory masterCoprocessorHostFactory, + final CatalogJanitorFactory catalogJanitorFactory, + final MBeanSource mBeanSource) + throws IOException, KeeperException, InterruptedException { + super(conf, + rpcServerFactory, + sleeperFactory, + zooKeeperWatcherFactory, + masterMetricsFactory, + clusterStatusTrackerFactory, + activeMasterManagerFactory, + catalogTrackerFactory, + assignmentManagerFactory, + regionServerTrackerFactory, + drainingServerTrackerFactory, + loadBalancerFactory, + masterFileSystemFactory, + fsTableDescriptorsFactory, + executorServiceFactory, + serverManagerFactory, + masterCoprocessorHostFactory, + catalogJanitorFactory, + mBeanSource); } protected void startCatalogJanitorChore() { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java index 2daf643..b6b46e6 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java @@ -38,14 +38,6 @@ public class ReplicationSourceDummy implements ReplicationSourceInterface { String peerClusterId; Path currentPath; - @Override - public void init(Configuration conf, FileSystem fs, - ReplicationSourceManager manager, Stoppable stopper, - AtomicBoolean replicating, String peerClusterId) - throws IOException { - this.manager = manager; - this.peerClusterId = peerClusterId; - } @Override public void enqueueLog(Path log) { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java index 18eb530..3af5eb7 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java @@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceImpl; +import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationSinkMetrics; import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; import org.junit.Before; @@ -89,7 +91,7 @@ public class TestReplicationSink { TEST_UTIL.getConfiguration().setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); TEST_UTIL.startMiniCluster(3); SINK = - new ReplicationSink(new Configuration(TEST_UTIL.getConfiguration()), STOPPABLE); + new ReplicationSink(new Configuration(TEST_UTIL.getConfiguration()), new ReplicationSinkMetrics(new ReplicationMetricsSourceImpl())); table1 = TEST_UTIL.createTable(TABLE_NAME1, FAM_NAME1); table2 = TEST_UTIL.createTable(TABLE_NAME2, FAM_NAME2); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 5828154..148db1d 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -24,7 +24,13 @@ import static org.junit.Assert.assertEquals; import java.net.URLEncoder; import java.util.ArrayList; import java.util.List; +import java.util.Map; +import com.google.inject.AbstractModule; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Module; +import com.google.inject.assistedinject.FactoryModuleBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -41,6 +47,7 @@ import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.HBaseGuiceTestUtil; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; @@ -93,6 +100,16 @@ public class TestReplicationSourceManager { private static Path logDir; + public static class ReplicationSourceDummyModule extends AbstractModule { + + @Override + protected void configure() { + install(new FactoryModuleBuilder() + .implement(ReplicationSourceInterface.class, ReplicationSourceDummy.class) + .build(ReplicationSourceFactory.class)); + } + } + @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -103,6 +120,7 @@ public class TestReplicationSourceManager { conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); utility = new HBaseTestingUtility(conf); utility.startMiniZKCluster(); + //utility.startMiniCluster(); zkw = new ZooKeeperWatcher(conf, "test", null); ZKUtil.createWithParents(zkw, "/hbase/replication"); @@ -116,13 +134,19 @@ public class TestReplicationSourceManager { ZKUtil.createWithParents(zkw, "/hbase/replication/state"); ZKUtil.setData(zkw, "/hbase/replication/state", Bytes.toBytes("true")); - replication = new Replication(new DummyServer(), fs, logDir, oldLogDir); - manager = replication.getReplicationManager(); fs = FileSystem.get(conf); - oldLogDir = new Path(utility.getDataTestDir(), - HConstants.HREGION_OLDLOGDIR_NAME); - logDir = new Path(utility.getDataTestDir(), - HConstants.HREGION_LOGDIR_NAME); + oldLogDir = new Path(utility.getDataTestDir(), HConstants.HREGION_OLDLOGDIR_NAME); + logDir = new Path(utility.getDataTestDir(), HConstants.HREGION_LOGDIR_NAME); + + Map modules = HBaseGuiceTestUtil.makeDefaultModules(); + modules.put("ReplicationSource", new ReplicationSourceDummyModule()); + + Injector injector = Guice.createInjector(modules.values()); + + replication = injector.getInstance(ReplicationFactory.class) + .create(new DummyServer(), fs, logDir, oldLogDir); + manager = replication.getReplicationManager(); + manager.addSource(slaveId); diff --git pom.xml pom.xml index 7a8a643..6c5193f 100644 --- pom.xml +++ pom.xml @@ -829,6 +829,7 @@ 2.3.1 1.3.1 3.5.0.Final-SNAPSHOT + 3.0 2.3 1.6 @@ -903,6 +904,16 @@ + com.google.inject + guice + ${guice.version} + + + com.google.inject.extensions + guice-assistedinject + ${guice.version} + + io.netty netty ${netty.version}