Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 1338023) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision ) @@ -202,6 +202,7 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ClusterConfigTracker; import org.apache.hadoop.io.Writable; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.metrics.util.MBeanUtil; @@ -387,6 +388,9 @@ // Log Splitting Worker private SplitLogWorker splitLogWorker; + private ClusterConfigTracker clusterConfigTracker; + private boolean dynamicConfigEnabled = false; + // A sleeper that sleeps for msgInterval. private final Sleeper sleeper; @@ -434,7 +438,9 @@ public HRegionServer(Configuration conf) throws IOException, InterruptedException { this.fsOk = true; - this.conf = conf; + //this.conf = conf; + this.zooKeeper = new ZooKeeperWatcher(conf, REGIONSERVER, this); + this.conf = getClusterConfiguration(conf); // Set how many times to retry talking to another server over HConnection. HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG); this.isOnline = false; @@ -503,6 +509,22 @@ cacheConfig = new CacheConfig(conf); } + private Configuration getClusterConfiguration(Configuration configuration) { + dynamicConfigEnabled = configuration.getBoolean( + "hbase.dynamic.configuration.enabled", false); + if (!dynamicConfigEnabled) { + LOG.info("HBase runtime dynamic cluster configuration is disabled. Configuration" + + "changes require a config file change and cluster restart."); + return configuration; + } + LOG.info("HBase runtime dynamic cluster configuration is enabled. Configuration changes" + + " can be done through the shell and does not require cluster restart"); + // Initialize cluster config tracker + this.clusterConfigTracker = new ClusterConfigTracker(getZooKeeper(), this, configuration); + this.clusterConfigTracker.start(); + return clusterConfigTracker.getClusterConfiguration(); + } + /** * Run test on configured codecs to make sure supporting libs are in place. * @param c @@ -647,8 +669,8 @@ */ private void initializeZooKeeper() throws IOException, InterruptedException { // Open connection to zookeeper and set primary watcher - this.zooKeeper = new ZooKeeperWatcher(conf, REGIONSERVER + ":" + - this.isa.getPort(), this); + //this.zooKeeper = new ZooKeeperWatcher(conf, REGIONSERVER + ":" + + // this.isa.getPort(), this); // Create the master address manager, register with zk, and start it. Then // block until a master is available. No point in starting up if no master Index: src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (revision 1338023) +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (revision ) @@ -19,13 +19,6 @@ */ package org.apache.hadoop.hbase.zookeeper; -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -41,6 +34,13 @@ import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.data.ACL; +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; + /** * Acts as the single ZooKeeper Watcher. One instance of this is instantiated * for each Master, RegionServer, and client process. @@ -102,6 +102,8 @@ public String clusterIdZNode; // znode used for log splitting work assignment public String splitLogZNode; + // znode to monitor online cluster configuration changes + public String clusterConfigZNode; // Certain ZooKeeper nodes need to be world-readable public static final ArrayList CREATOR_ALL_AND_WORLD_READABLE = @@ -165,6 +167,7 @@ ZKUtil.createAndFailSilent(this, tableZNode); ZKUtil.createAndFailSilent(this, splitLogZNode); ZKUtil.createAndFailSilent(this, backupMasterAddressesZNode); + ZKUtil.createAndFailSilent(this, clusterConfigZNode); } catch (KeeperException e) { throw new ZooKeeperConnectionException( prefix("Unexpected KeeperException creating base node"), e); @@ -212,6 +215,9 @@ conf.get("zookeeper.znode.clusterId", "hbaseid")); splitLogZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.splitlog", HConstants.SPLIT_LOGDIR_NAME)); + clusterConfigZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.cluster.config", "clusterConfig")); + } /** Index: src/main/java/org/apache/hadoop/hbase/master/HMaster.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1338023) +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision ) @@ -94,6 +94,10 @@ import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.*; import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; @@ -107,6 +111,7 @@ import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.hbase.zookeeper.ClusterId; import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; +import org.apache.hadoop.hbase.zookeeper.ClusterConfigTracker; import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker; import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -193,6 +198,9 @@ private CatalogTracker catalogTracker; // Cluster status zk tracker and local setter private ClusterStatusTracker clusterStatusTracker; + // Cluster config tracker + private ClusterConfigTracker clusterConfigTracker = null; + private boolean dynamicConfigEnabled = false; // buffer for "fatal error" notices from region servers // in the cluster. This is only used for assisting @@ -252,7 +260,9 @@ */ public HMaster(final Configuration conf) throws IOException, KeeperException, InterruptedException { - this.conf = new Configuration(conf); + + this.zooKeeper = new ZooKeeperWatcher(conf, MASTER, this, true); + this.conf = getClusterConfiguration(conf); // Disable the block cache on the master this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); // Set how many times to retry talking to another server over HConnection. @@ -306,6 +316,22 @@ this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000); } + private Configuration getClusterConfiguration(Configuration configuration) { + dynamicConfigEnabled = configuration.getBoolean( + "hbase.dynamic.configuration.enabled", false); + if (!dynamicConfigEnabled) { + LOG.info("HBase runtime dynamic cluster configuration is disabled. Configuration" + + "changes require a config file change and cluster restart."); + return configuration; + } + LOG.info("HBase runtime dynamic cluster configuration is enabled. Configuration changes" + + " can be done through the shell and does not require cluster restart"); + // Initialize cluster config tracker + this.clusterConfigTracker = new ClusterConfigTracker(getZooKeeper(), this, configuration); + this.clusterConfigTracker.start(); + return clusterConfigTracker.getClusterConfiguration(); + } + /** * Stall startup if we are designated a backup master; i.e. we want someone * else to become the master before proceeding. @@ -1579,6 +1605,11 @@ IOException, KeeperException, ExecutionException { this.zooKeeper.reconnectAfterExpiration(); + // Re initialize clusterConfigTracker if required. + if (dynamicConfigEnabled && clusterConfigTracker != null) { + this.clusterConfigTracker = new ClusterConfigTracker(getZooKeeper(), this, conf); + this.clusterConfigTracker.start(); + } Callable callable = new Callable () { public Boolean call() throws InterruptedException, @@ -1666,6 +1697,11 @@ } @Override + public ClusterConfigTracker getClusterConfigTracker() { + return this.clusterConfigTracker; + } + + @Override public AssignmentManager getAssignmentManager() { return this.assignmentManager; } @@ -1673,6 +1709,17 @@ public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() { return rsFatals; } + + @Override + public boolean updateConfig(String configKey, String configValue) { + return this.clusterConfigTracker.updateClusterConfig(configKey, configValue); + } + + @Override + public String getConfig(String configKey) { + return this.conf.get(configKey); + } + @SuppressWarnings("deprecation") @Override Index: src/main/ruby/shell.rb IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/main/ruby/shell.rb (revision 1338023) +++ src/main/ruby/shell.rb (revision ) @@ -287,6 +287,8 @@ unassign zk_dump hlog_roll + update_config + get_config ] ) Index: src/main/java/org/apache/hadoop/hbase/HBaseInMemoryConfiguration.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/main/java/org/apache/hadoop/hbase/HBaseInMemoryConfiguration.java (revision ) +++ src/main/java/org/apache/hadoop/hbase/HBaseInMemoryConfiguration.java (revision ) @@ -0,0 +1,141 @@ +package org.apache.hadoop.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; + +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class HBaseInMemoryConfiguration extends Configuration { + + private static final Log LOG = LogFactory.getLog(HBaseInMemoryConfiguration.class); + + private ConcurrentHashMap clusterConfigMap + = new ConcurrentHashMap(100); + + public HBaseInMemoryConfiguration(Configuration configuration) { + super(configuration); + } + + public String get(String configKey) { + if (clusterConfigMap.containsKey(configKey)) { + return clusterConfigMap.get(configKey); + } + return super.get(configKey); + } + + public void put(String configKey, String configValue) { + this.clusterConfigMap.put(configKey, configValue); + } + + @Override + public long getLong(String name, long defaultValue) { + if (clusterConfigMap.containsKey(name)) { + String longValue = clusterConfigMap.get(name); + try { + return longValue == null ? defaultValue : Long.parseLong(longValue); + } catch (NumberFormatException nfe) { + return super.getLong(name, defaultValue); + } + } + return super.getLong(name, defaultValue); + } + + @Override + public int getInt(String name, int defaultValue) { + if (clusterConfigMap.containsKey(name)) { + String intValue = clusterConfigMap.get(name); + try { + return intValue == null ? defaultValue : Integer.parseInt(intValue); + } catch (NumberFormatException nfe) { + return super.getInt(name, defaultValue); + } + } + return super.getInt(name, defaultValue); + } + + @Override + public String get(String name, String defaultValue) { + if (clusterConfigMap.containsKey(name)) { + String stringValue = clusterConfigMap.get(name); + return stringValue == null ? defaultValue : stringValue; + } + return super.get(name, defaultValue); + } + + @Override + public float getFloat(String name, float defaultValue) { + if (clusterConfigMap.containsKey(name)) { + String floatValue = clusterConfigMap.get(name); + try { + return floatValue == null ? defaultValue : Float.parseFloat(floatValue); + } catch (NumberFormatException nfe) { + return super.getFloat(name, defaultValue); + } + } + return super.getFloat(name, defaultValue); + } + + @Override + public boolean getBoolean(String name, boolean defaultValue) { + if (clusterConfigMap.containsKey(name)) { + String booleanValue = clusterConfigMap.get(name); + if (booleanValue == null) { + return defaultValue; + } + booleanValue = booleanValue.trim(); + if ("true".equalsIgnoreCase(booleanValue)) { + return true; + } + else if ("false".equals(booleanValue)) { + return false; + } else { + return defaultValue; + } + } + return super.getBoolean(name, defaultValue); + } + + public void refreshClusterConfigMap(List clusterData) { + if (clusterData != null) { + LOG.debug("Found Cluster configuration in ZK. Refreshing the in memory configuration map from ZK"); + for (ZKUtil.NodeAndData nodeData : clusterData) { + String configNodePath = nodeData.getNode(); + String configKey = configNodePath.substring(configNodePath.lastIndexOf("/") + 1, + configNodePath.length()); + clusterConfigMap.put(configKey, Bytes.toString(nodeData.getData())); + } + LOG.debug("Refreshed cluster configuration map. Current size = " + + clusterConfigMap.size()); + } + } + + /** + * Get a read only version of the current configuration map. + * @return + */ + public Map getClusterConfigMap() { + return Collections.unmodifiableMap(clusterConfigMap); + } +} Index: src/main/java/org/apache/hadoop/hbase/master/MasterServices.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (revision 1338023) +++ src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (revision ) @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.ExecutorService; +import org.apache.hadoop.hbase.zookeeper.ClusterConfigTracker; import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; /** @@ -80,4 +81,10 @@ * @return true if master enables ServerShutdownHandler; */ public boolean isServerShutdownHandlerEnabled(); + + /** + * Return the current cluster config tracker + * @return + */ + public ClusterConfigTracker getClusterConfigTracker(); } Index: src/main/java/org/apache/hadoop/hbase/HConstants.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/main/java/org/apache/hadoop/hbase/HConstants.java (revision 1338023) +++ src/main/java/org/apache/hadoop/hbase/HConstants.java (revision ) @@ -578,7 +578,7 @@ "hbase.regionserver.hostname.seen.by.master"; public static final String HBASE_MASTER_LOGCLEANER_PLUGINS = - "hbase.master.logcleaner.plugins"; + "hbase.master.logcleaner.plugins"; public static final String HBASE_REGION_SPLIT_POLICY_KEY = "hbase.regionserver.region.split.policy"; Index: src/test/java/org/apache/hadoop/hbase/TestHBaseDynamicConfiguration.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/test/java/org/apache/hadoop/hbase/TestHBaseDynamicConfiguration.java (revision ) +++ src/test/java/org/apache/hadoop/hbase/TestHBaseDynamicConfiguration.java (revision ) @@ -0,0 +1,284 @@ +package org.apache.hadoop.hbase; + +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.zookeeper.ClusterConfigTracker; +import org.codehaus.jackson.map.ObjectMapper; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.io.StringWriter; +import java.util.Map; + +import static org.junit.Assert.*; + +@Category(MediumTests.class) +public class TestHBaseDynamicConfiguration { + + final Log LOG = LogFactory.getLog(getClass()); + protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + protected static MiniHBaseCluster miniHBaseCluster = null; + protected Configuration conf; + + @Before + public void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().setBoolean("hbase.dynamic.configuration.enabled", true); + miniHBaseCluster = TEST_UTIL.startMiniCluster(1,1); + } + + @After + public void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testConfigurationChangesWhileMasterCrash() throws IOException, InterruptedException { + LOG.info("Start testConfigurationChangesWhileMasterCrash"); + + Configuration clusterConfig = TEST_UTIL.getConfiguration(); + int balancerPeriod = clusterConfig.getInt("hbase.balancer.period", 10); + assertEquals(balancerPeriod, 300000); + int handlerCount = clusterConfig.getInt("hbase.master.handler.count", 30); + assertEquals(handlerCount, 30); + long rsFatalsBuffer = clusterConfig.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024); + assertEquals(rsFatalsBuffer, 1*1024*1024); + long catalogTimeout = clusterConfig.getInt("hbase.master.catalog.timeout", Integer.MAX_VALUE); + assertEquals(catalogTimeout, Integer.MAX_VALUE); + long catalogVerifyTimeout = clusterConfig.getInt("hbase.catalog.verification.timeout", 1000); + assertEquals(catalogVerifyTimeout, 1000); + + ClusterConfigTracker cct = miniHBaseCluster.getMaster().getClusterConfigTracker(); + updateClusterConfig(cct); + + Thread.sleep(5000); + HBaseInMemoryConfiguration masterConfig = (HBaseInMemoryConfiguration) + miniHBaseCluster.getMaster().getConfiguration(); + assertConfigChanges(masterConfig); + + TEST_UTIL.getHBaseCluster().startMaster(); + Thread.sleep(10000); + + HMaster activeMaster = TEST_UTIL.getHBaseCluster().getMaster(); + activeMaster.abort("Aborting primary master to test dynamic configuration overrides. ", + new Exception("Die now")); + Thread.sleep(15000); + + HMaster secondMaster = TEST_UTIL.getHBaseCluster().getMaster(); + HBaseInMemoryConfiguration secondMasterConfig = + (HBaseInMemoryConfiguration) secondMaster.getConfiguration(); + assertConfigChanges(secondMasterConfig); + + HRegionServer hRegionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0); + HBaseInMemoryConfiguration regionConfig = + (HBaseInMemoryConfiguration) hRegionServer.getConfiguration(); + assertConfigChanges(regionConfig); + + LOG.info("End testConfigurationChangesWhileMasterCrash"); + } + + private void updateClusterConfig(ClusterConfigTracker cct) { + cct.updateClusterConfig("hbase.balancer.period", "200000"); + cct.updateClusterConfig("hbase.master.handler.count", "30"); + cct.updateClusterConfig("hbase.master.buffer.for.rs.fatals", Long.toString(2*1024*1024)); + cct.updateClusterConfig("hbase.master.catalog.timeout", Integer.toString(Integer.MAX_VALUE-100)); + cct.updateClusterConfig("hbase.catalog.verification.timeout", Long.toString(2000)); + } + + private void assertConfigChanges(Configuration configuration) { + int balancePeriod = configuration.getInt("hbase.balancer.period", 40000); + LOG.info("Balancer period from second master config = " + balancePeriod); + assertEquals(balancePeriod, 200000); + int handlerCount = configuration.getInt("hbase.master.handler.count", 20); + assertEquals(handlerCount, 30); + long rsFatalsBuffer = configuration.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024); + assertEquals(rsFatalsBuffer, 2*1024*1024); + int catalogTimeout = configuration.getInt("hbase.master.catalog.timeout", Integer.MAX_VALUE); + assertEquals(catalogTimeout, Integer.MAX_VALUE-100); + int catalogVerifyTimeout = configuration.getInt("hbase.catalog.verification.timeout", 1000); + assertEquals(catalogVerifyTimeout, 2000); + } + + @Test + public void testDumpConfiguration() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + StringWriter outWriter = new StringWriter(); + conf.dumpConfiguration(conf, outWriter); + String jsonStr = outWriter.toString(); + ObjectMapper mapper = new ObjectMapper(); + JsonConfiguration jconf = mapper.readValue(jsonStr, JsonConfiguration.class); + JsonProperty[] props = jconf.getProperties(); + if (props != null) { + System.err.println("Props length = " + props.length); + } + System.err.println("Done"); + assertEquals("1","1"); + } + + @Test + public void testConfigMemoryOverride() throws Exception { + LOG.info("Start testConfigMemoryOverride"); + + Configuration clusterConfig = TEST_UTIL.getConfiguration(); + int balancerPeriod = clusterConfig.getInt("hbase.balancer.period", 10); + assertEquals(balancerPeriod, 300000); + ClusterConfigTracker cct = miniHBaseCluster.getMaster().getClusterConfigTracker(); + cct.updateClusterConfig("hbase.balancer.period", "200000"); + Thread.sleep(1000); + HBaseInMemoryConfiguration masterConfig = (HBaseInMemoryConfiguration) + miniHBaseCluster.getMaster().getConfiguration(); + int newBalancerPeriod = masterConfig.getInt("hbase.balancer.period", 40000); + assertEquals(newBalancerPeriod, 200000); + LOG.info("End testConfigMemoryOverride"); + + } + + + + @Test + public void testGetClusterConfigMap() throws Exception { + LOG.info("Start testGetClusterConfigMap"); + ClusterConfigTracker cct = miniHBaseCluster.getMaster().getClusterConfigTracker(); + Map configMap = cct.getClusterConfigMap(); + assertNotNull(configMap); + assertTrue(configMap.size() > 0); + LOG.info("End testGetClusterConfigMap"); + + } + + @Test + public void testConfigAddition() throws Exception { + LOG.info("Start testConfigAddition"); + + ClusterConfigTracker cct = miniHBaseCluster.getMaster().getClusterConfigTracker(); + int sizeBefore = cct.getClusterConfigMap().size(); + cct.createClusterConfig("hbase.config.test.TestConfigKey", "TestConfigValue"); + Thread.sleep(1000); + int sizeAfter = cct.getClusterConfigMap().size(); + assertEquals(sizeBefore + 1, sizeAfter); + String configValue = + (String) cct.getClusterConfigMap().get("hbase.config.test.TestConfigKey"); + assertEquals(configValue, "TestConfigValue"); + LOG.info("End testConfigAddition"); + + } + + @Test + public void testConfigDuplicateAddition() throws Exception { + LOG.info("Start testConfigDuplicateAddition"); + + ClusterConfigTracker cct = miniHBaseCluster.getMaster().getClusterConfigTracker(); + int sizeBefore = cct.getClusterConfigMap().size(); + cct.createClusterConfig("hbase.config.test.TestConfigKey", "TestConfigValue"); + Thread.sleep(1000); + int sizeAfter = cct.getClusterConfigMap().size(); + assertEquals(sizeAfter, sizeBefore + 1); + String configValue = + (String) cct.getClusterConfigMap().get("hbase.config.test.TestConfigKey"); + assertEquals(configValue, "TestConfigValue"); + // Now try to create the same config again. This should simply update the config + // value + cct.createClusterConfig("hbase.config.test.TestConfigKey", "TestConfigValue201"); + Thread.sleep(1000); + sizeAfter = cct.getClusterConfigMap().size(); + assertEquals(sizeAfter, sizeBefore + 1); + configValue = + (String) cct.getClusterConfigMap().get("hbase.config.test.TestConfigKey"); + assertEquals(configValue, "TestConfigValue201"); + LOG.info("End testConfigDuplicateAddition"); + + } + + @Test + public void testConfigUpdate() throws Exception { + LOG.info("Start testConfigUpdate"); + + ClusterConfigTracker cct = miniHBaseCluster.getMaster().getClusterConfigTracker(); + int sizeBefore = cct.getClusterConfigMap().size(); + cct.createClusterConfig("hbase.config.test.TestConfigKey", "TestConfigValue"); + Thread.sleep(1000); + int sizeAfter = cct.getClusterConfigMap().size(); + assertEquals(sizeAfter, sizeBefore + 1); + String configValue = + (String) cct.getClusterConfigMap().get("hbase.config.test.TestConfigKey"); + assertEquals(configValue, "TestConfigValue"); + sizeBefore = cct.getClusterConfigMap().size(); + cct.updateClusterConfig("hbase.config.test.TestConfigKey", "TestConfigValue101"); + Thread.sleep(1000); + sizeAfter = cct.getClusterConfigMap().size(); + // No change in config size map since we are only updating existing value + assertEquals(sizeAfter, sizeBefore); + configValue = + (String) cct.getClusterConfigMap().get("hbase.config.test.TestConfigKey"); + assertEquals(configValue, "TestConfigValue101"); + LOG.info("End testConfigUpdate"); + + } + + static class JsonConfiguration { + JsonProperty[] properties; + + public JsonProperty[] getProperties() { + return properties; + } + + public void setProperties(JsonProperty[] properties) { + this.properties = properties; + } + } + + static class JsonProperty { + String key; + public String getKey() { + return key; + } + public void setKey(String key) { + this.key = key; + } + public String getValue() { + return value; + } + public void setValue(String value) { + this.value = value; + } + public boolean getIsFinal() { + return isFinal; + } + public void setIsFinal(boolean isFinal) { + this.isFinal = isFinal; + } + public String getResource() { + return resource; + } + public void setResource(String resource) { + this.resource = resource; + } + String value; + boolean isFinal; + String resource; + } + +} Index: conf/hbase-site.xml IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- conf/hbase-site.xml (revision 1338023) +++ conf/hbase-site.xml (revision ) @@ -22,4 +22,20 @@ */ --> + + hbase.rootdir + hdfs://localhost:9000/hbase + + + hbase.cluster.distributed + true + + + hbase.dynamic.configuration.enabled + true + + Enables the dynamic cluster configuration ability.If enabled, you can make + runtime cluster configuration changes with out the need for restarting the cluster. + + Index: src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (revision 1338023) +++ src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (revision ) @@ -1813,4 +1813,24 @@ function.master.close(); } } + + + public boolean updateConfig(String configKey, String configValue) { + try { + return this.connection.getKeepAliveMaster().updateConfig(configKey, configValue); + } catch (IOException e) { + LOG.error("Could not Update ClusterStatus()",e); + return false; + } + } + + public String getConfig(String configKey) { + try { + return this.connection.getKeepAliveMaster().getConfig(configKey); + } catch (IOException e) { + LOG.error("Could not getClusterConfig value()",e); + return null; + } + } + } Index: src/main/ruby/hbase/admin.rb IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/main/ruby/hbase/admin.rb (revision 1338023) +++ src/main/ruby/hbase/admin.rb (revision ) @@ -92,6 +92,20 @@ end #---------------------------------------------------------------------------------------------- + # Update a cluster configuration + # Returns true if update cluster config is successful. + def update_config(configKey, configValue) + @admin.updateConfig(configKey, configValue) + end + + #---------------------------------------------------------------------------------------------- + # Get current cluster configuration for a key + # Returns value of cluster config key. + def get_config(configKey) + @admin.getConfig(configKey) + end + + #---------------------------------------------------------------------------------------------- # Enables a table def enable(table_name) tableExists(table_name) Index: src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (revision 1338023) +++ src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (revision ) @@ -19,19 +19,19 @@ */ package org.apache.hadoop.hbase.ipc; -import java.io.IOException; -import java.util.List; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.UnknownRegionException; -import org.apache.hadoop.hbase.security.TokenInfo; import org.apache.hadoop.hbase.security.KerberosInfo; +import org.apache.hadoop.hbase.security.TokenInfo; import org.apache.hadoop.hbase.util.Pair; +import java.io.IOException; +import java.util.List; + /** * Clients interact with the HMasterInterface to gain access to meta-level * HBase functionality, like finding an HRegionServer and creating/destroying @@ -266,4 +266,21 @@ * @return array of HTableDescriptor */ public HTableDescriptor[] getHTableDescriptors(List tableNames); + + /** + * + * @param configKey + * @param configValue + * @return + */ + public boolean updateConfig(String configKey, String configValue); + + /** + * + * @param configKey + * @return + */ + public String getConfig(String configKey); + + } Index: src/main/resources/hbase-default.xml IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/main/resources/hbase-default.xml (revision 1338023) +++ src/main/resources/hbase-default.xml (revision ) @@ -862,6 +862,15 @@ In both cases, the aggregated metric M across tables and cfs will be reported. + + hbase.dynamic.configuration.enabled + false + + Enables the dynamic cluster configuration ability.If enabled, you can make + runtime cluster configuration changes with out the need for restarting the cluster. + + + hbase.metrics.exposeOperationTimes Index: src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (revision 1338023) +++ src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (revision ) @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ClusterConfigTracker; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; @@ -69,6 +70,7 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; + @Category(SmallTests.class) public class TestCatalogJanitor { /** @@ -280,6 +282,11 @@ @Override public boolean isServerShutdownHandlerEnabled() { return true; + } + + @Override + public ClusterConfigTracker getClusterConfigTracker() { + return null; //To change body of implemented methods use File | Settings | File Templates. } }