CREATOR_ALL_AND_WORLD_READABLE =
@@ -164,6 +166,7 @@
ZKUtil.createAndFailSilent(this, tableZNode);
ZKUtil.createAndFailSilent(this, splitLogZNode);
ZKUtil.createAndFailSilent(this, backupMasterAddressesZNode);
+ ZKUtil.createAndFailSilent(this, clusterConfigZNode);
} catch (KeeperException e) {
throw new ZooKeeperConnectionException(
prefix("Unexpected KeeperException creating base node"), e);
@@ -211,6 +214,9 @@
conf.get("zookeeper.znode.clusterId", "hbaseid"));
splitLogZNode = ZKUtil.joinZNode(baseZNode,
conf.get("zookeeper.znode.splitlog", HConstants.SPLIT_LOGDIR_NAME));
+ clusterConfigZNode = ZKUtil.joinZNode(baseZNode,
+ conf.get("zookeeper.znode.cluster.config", "clusterConfig"));
+
}
/**
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterConfigTracker.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterConfigTracker.java (revision 0)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterConfigTracker.java (revision 0)
@@ -0,0 +1,277 @@
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseInMemoryConfiguration;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.zookeeper.KeeperException;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+import java.io.StringWriter;
+import java.util.Map;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class ClusterConfigTracker extends ZooKeeperNodeTracker {
+ public static final Log LOG = LogFactory.getLog(ClusterConfigTracker.class);
+ private Configuration configuration;
+ private HBaseInMemoryConfiguration clusterConfiguration = null;
+
+ /**
+ * Constructs a new ZK node tracker.
+ *
+ * After construction, use {@link #start} to kick off tracking.
+ *
+ * @param watcher
+ * @param abortable
+ */
+ public ClusterConfigTracker(ZooKeeperWatcher watcher,
+ Abortable abortable, Configuration configuration) {
+ super(watcher, watcher.clusterConfigZNode, abortable);
+ this.configuration = configuration;
+ this.clusterConfiguration = new HBaseInMemoryConfiguration(configuration);
+ }
+
+ @Override
+ public void start() {
+ try {
+ LOG.info("Starting cluster configuration tracker");
+ watcher.registerListener(this);
+ initClusterConfig();
+ } catch (KeeperException e) {
+ LOG.error("ClusterConfigTracker startup failed.", e);
+ abortable.abort("ClusterConfigTracker startup failed", e);
+ } catch (IOException e) {
+ LOG.error("ClusterConfigTracker startup failed.", e);
+ abortable.abort("ClusterConfigTracker startup failed", e);
+ }
+ }
+
+ private boolean isClusterConfigAvailableInZK() throws KeeperException {
+ int checkExists = ZKUtil.checkExists(watcher, watcher.clusterConfigZNode);
+ if (checkExists != -1) {
+ int configNodes = ZKUtil.getNumberOfChildren(watcher, watcher.clusterConfigZNode);
+ byte[] data = ZKUtil.getData(watcher, watcher.clusterConfigZNode);
+ if (data != null && data.length > 0 && configNodes > 0) {
+ // Cluster configuration already exists in ZK.
+ LOG.info("ZK Cluster Config already available. Skipping config ZK creation");
+ return true;
+ }
+ }
+ LOG.info("ZK Cluster Config not available. ");
+ return false;
+ }
+
+ private void initClusterConfig() throws KeeperException, IOException {
+
+ if (!isClusterConfigAvailableInZK()) {
+ createClusterConfig();
+ ZKUtil.getChildDataAndWatchForNewChildren(watcher, watcher.clusterConfigZNode);
+ } else {
+ clusterConfiguration.refreshClusterConfigMap(
+ ZKUtil.getChildDataAndWatchForNewChildren(watcher, watcher.clusterConfigZNode));
+ }
+ }
+
+ /**
+ * Get the current cluster configuration map.
+ *
+ * @return
+ */
+ public Map getClusterConfigMap() {
+ return this.clusterConfiguration.getClusterConfigMap();
+ }
+
+ /**
+ * Get current in memory cluster configuration
+ *
+ * @return HBaseInMemoryConfiguration
+ */
+ public HBaseInMemoryConfiguration getClusterConfiguration() {
+ return clusterConfiguration;
+ }
+
+ public void setClusterConfiguration(HBaseInMemoryConfiguration clusterConfiguration) {
+ this.clusterConfiguration = clusterConfiguration;
+ }
+
+ /**
+ * Update an existing HBase cluster configuration key. Creates a new configuration node,
+ * if the config does not exist.
+ *
+ * @param configKey
+ * @param configValue
+ */
+ public boolean updateClusterConfig(String configKey, String configValue) {
+ try {
+ if (ZKUtil.checkExists(watcher,
+ getConfigNodePath(configKey)) > 0) {
+ LOG.debug("Updating cluster configuration. Config Key = " + configKey
+ + " Value = " + configValue);
+ ZKUtil.setData(watcher, getConfigNodePath(configKey), Bytes.toBytes(configValue));
+ } else {
+ LOG.debug("Cluster configuration does not exist for Config Key = "
+ + configKey + " Creating new cluster config node");
+ ZKUtil.createSetData(watcher, getConfigNodePath(configKey),
+ Bytes.toBytes(configValue));
+ }
+ } catch (KeeperException e) {
+ LOG.error("Failure during update cluster configuration", e);
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Create a new HBase Cluster configuration key. Updates the configuration if the configuration
+ * node already exists.
+ *
+ * @param configKey
+ * @param configValue
+ */
+ public boolean createClusterConfig(String configKey, String configValue) {
+ return updateClusterConfig(configKey, configValue);
+ }
+
+ private String getConfigNodePath(String configKey) {
+ return ZKUtil.joinZNode(watcher.clusterConfigZNode, configKey);
+ }
+
+ private void createClusterConfig() throws IOException, KeeperException {
+ LOG.info("Creating Cluster Configuration ZK nodes");
+ JsonConfiguration jsonConfiguration = readClusterConfiguration(configuration);
+ JsonProperty[] jsonProperties = jsonConfiguration.getProperties();
+ if (jsonProperties.length > 0) {
+ LOG.info("Number of cluster configuration properties = " + jsonProperties.length);
+ for (JsonProperty jsonProperty : jsonProperties) {
+ ZKUtil.createSetData(watcher, getZKConfigNodeName(jsonProperty.getKey()),
+ Bytes.toBytes(jsonProperty.getValue()));
+ clusterConfiguration.put(jsonProperty.getKey(), jsonProperty.getValue());
+ }
+ ZKUtil.setData(watcher, watcher.clusterConfigZNode, Bytes.toBytes(jsonProperties.length));
+ } else {
+ LOG.error("Cannot load cluster configuration properties to ZK.");
+ }
+ }
+
+
+ private String getZKConfigNodeName(String key) {
+ return ZKUtil.joinZNode(watcher.clusterConfigZNode, key);
+ }
+
+ private JsonConfiguration readClusterConfiguration(Configuration configuration) throws IOException {
+ StringWriter outWriter = new StringWriter();
+ Configuration.dumpConfiguration(configuration, outWriter);
+ String jsonStr = outWriter.toString();
+ ObjectMapper mapper = new ObjectMapper();
+ return mapper.readValue(jsonStr, JsonConfiguration.class);
+ }
+
+ @Override
+ public void nodeCreated(String path) {
+ if (path.startsWith(watcher.clusterConfigZNode)) {
+ processConfigChange();
+ }
+ }
+
+ @Override
+ public void nodeDeleted(String path) {
+ if (path.startsWith(watcher.clusterConfigZNode)) {
+ processConfigChange();
+ }
+ }
+
+ private void processConfigChange() {
+ try {
+ clusterConfiguration.refreshClusterConfigMap(
+ ZKUtil.getChildDataAndWatchForNewChildren(watcher, watcher.clusterConfigZNode));
+ } catch (KeeperException ke) {
+ LOG.error("Cluster config refresh error ", ke);
+ }
+ }
+
+ @Override
+ public void nodeDataChanged(String path) {
+ if (path.startsWith(watcher.clusterConfigZNode)) {
+ processConfigChange();
+ }
+ }
+
+ @Override
+ public void nodeChildrenChanged(String path) {
+ if (path.startsWith(watcher.clusterConfigZNode)) {
+ processConfigChange();
+ }
+ }
+
+ static class JsonConfiguration {
+ JsonProperty[] properties;
+
+ public JsonProperty[] getProperties() {
+ return properties;
+ }
+
+ public void setProperties(JsonProperty[] properties) {
+ this.properties = properties;
+ }
+ }
+
+ static class JsonProperty {
+ String key;
+ String value;
+ boolean isFinal;
+ String resource;
+
+ public String getKey() {
+ return key;
+ }
+
+ public void setKey(String key) {
+ this.key = key;
+ }
+
+ public String getValue() {
+ return value;
+ }
+
+ public void setValue(String value) {
+ this.value = value;
+ }
+
+ public boolean getIsFinal() {
+ return isFinal;
+ }
+
+ public void setIsFinal(boolean isFinal) {
+ this.isFinal = isFinal;
+ }
+
+ public String getResource() {
+ return resource;
+ }
+
+ public void setResource(String resource) {
+ this.resource = resource;
+ }
+ }
+
+}
+
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (revision 1382165)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (working copy)
@@ -87,6 +87,11 @@
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetConfigRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetConfigResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UpdateConfigRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UpdateConfigResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse;
@@ -2095,4 +2100,29 @@
throw new IOException("Unexpected exception when calling master", e);
}
}
+
+
+ public boolean updateConfig(final String configKey, final String configValue) throws IOException {
+ return execute(new MasterAdminCallable() {
+ @Override
+ public Boolean call() throws ServiceException {
+ UpdateConfigRequest request = RequestConverter.buildUpdateConfigRequest(configKey, configValue);
+ UpdateConfigResponse response = masterAdmin.updateConfig(null, request);
+ return response.getUpdateConfigResponse();
+ }
+ });
+
+ }
+
+ public String getConfig(final String configKey) throws IOException {
+ return execute(new MasterAdminCallable() {
+ @Override
+ public String call() throws ServiceException {
+ GetConfigRequest request = RequestConverter.buildGetConfigRequest(configKey);
+ GetConfigResponse response = masterAdmin.getConfig(null, request);
+ return response.getConfigValue();
+ }
+ });
+ }
+
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 1382165)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy)
@@ -1,4 +1,5 @@
/**
+ * Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@ -204,6 +205,8 @@
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ClusterConfigTracker;
+import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.net.DNS;
@@ -389,6 +392,9 @@
// Log Splitting Worker
private SplitLogWorker splitLogWorker;
+ private ClusterConfigTracker clusterConfigTracker;
+ private boolean dynamicConfigEnabled = false;
+
// A sleeper that sleeps for msgInterval.
private final Sleeper sleeper;
@@ -451,7 +457,10 @@
public HRegionServer(Configuration conf)
throws IOException, InterruptedException {
this.fsOk = true;
- this.conf = conf;
+ //this.conf = conf;
+ this.zooKeeper = new ZooKeeperWatcher(conf, REGIONSERVER, this);
+ this.conf = getClusterConfiguration(conf);
+
// Set how many times to retry talking to another server over HConnection.
HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG);
this.isOnline = false;
@@ -527,6 +536,22 @@
cacheConfig = new CacheConfig(conf);
}
+ private Configuration getClusterConfiguration(Configuration configuration) {
+ dynamicConfigEnabled = configuration.getBoolean(
+ "hbase.dynamic.configuration.enabled", false);
+ if (!dynamicConfigEnabled) {
+ LOG.info("HBase runtime dynamic cluster configuration is disabled. Configuration" +
+ "changes require a config file change and cluster restart.");
+ return configuration;
+ }
+ LOG.info("HBase runtime dynamic cluster configuration is enabled. Configuration changes" +
+ " can be done through the shell and does not require cluster restart");
+ // Initialize cluster config tracker
+ this.clusterConfigTracker = new ClusterConfigTracker(getZooKeeper(), this, configuration);
+ this.clusterConfigTracker.start();
+ return clusterConfigTracker.getClusterConfiguration();
+ }
+
/**
* Run test on configured codecs to make sure supporting libs are in place.
* @param c
@@ -738,8 +763,8 @@
*/
private void initializeZooKeeper() throws IOException, InterruptedException {
// Open connection to zookeeper and set primary watcher
- this.zooKeeper = new ZooKeeperWatcher(conf, REGIONSERVER + ":" +
- this.isa.getPort(), this);
+ //this.zooKeeper = new ZooKeeperWatcher(conf, REGIONSERVER + ":" +
+ // this.isa.getPort(), this);
// Create the master address manager, register with zk, and start it. Then
// block until a master is available. No point in starting up if no master
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (revision 1382165)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (working copy)
@@ -25,6 +25,7 @@
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.executor.ExecutorService;
+import org.apache.hadoop.hbase.zookeeper.ClusterConfigTracker;
/**
* Services Master supplies
@@ -77,4 +78,26 @@
* @return true if master enables ServerShutdownHandler;
*/
public boolean isServerShutdownHandlerEnabled();
+
+ /**
+ * Return the current cluster config tracker
+ * @return
+ */
+ public ClusterConfigTracker getClusterConfigTracker();
+
+ /**
+ *
+ * @param configKey
+ * @param configValue
+ * @return
+ */
+ public boolean updateConfig(String configKey, String configValue);
+
+ /**
+ *
+ * @param configKey
+ * @return
+ */
+ public String getConfig(String configKey);
+
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (revision 1382165)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (working copy)
@@ -685,7 +685,6 @@
lastCountChange = now;
}
}
-
LOG.info("Finished waiting for region servers count to settle;" +
" checked in " + count + ", slept for " + slept + " ms," +
" expecting minimum of " + minToStart + ", maximum of "+ maxToStart+","+
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1382165)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy)
@@ -146,6 +146,10 @@
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UpdateConfigRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UpdateConfigResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetConfigResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetConfigRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
@@ -179,6 +183,7 @@
import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
+import org.apache.hadoop.hbase.zookeeper.ClusterConfigTracker;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.metrics.util.MBeanUtil;
@@ -190,6 +195,8 @@
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
+
+//import org.apache.hadoop.hbase.ipc;
/**
* HMaster is the "master server" for HBase. An HBase cluster has one active
* master. If many masters are started, all compete. Whichever wins goes on to
@@ -258,7 +265,10 @@
private CatalogTracker catalogTracker;
// Cluster status zk tracker and local setter
private ClusterStatusTracker clusterStatusTracker;
-
+ // Cluster config tracker
+ private ClusterConfigTracker clusterConfigTracker = null;
+ private boolean dynamicConfigEnabled = false;
+
// buffer for "fatal error" notices from region servers
// in the cluster. This is only used for assisting
// operations/debugging.
@@ -322,7 +332,9 @@
*/
public HMaster(final Configuration conf)
throws IOException, KeeperException, InterruptedException {
- this.conf = new Configuration(conf);
+ this.zooKeeper = new ZooKeeperWatcher(conf, MASTER, this, true);
+ this.conf = getClusterConfiguration(conf);
+
// Disable the block cache on the master
this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
// Set how many times to retry talking to another server over HConnection.
@@ -382,6 +394,23 @@
this.metrics = new MasterMetrics( new MasterMetricsWrapperImpl(this));
}
+ private Configuration getClusterConfiguration(Configuration configuration) {
+ //dynamicConfigEnabled = true;
+ dynamicConfigEnabled = configuration.getBoolean(
+ "hbase.dynamic.configuration.enabled", false);
+ if (!dynamicConfigEnabled) {
+ LOG.info("HBase runtime dynamic cluster configuration is disabled. Configuration" +
+ "changes require a config file change and cluster restart.");
+ return configuration;
+ }
+ LOG.info("HBase runtime dynamic cluster configuration is enabled. Configuration changes" +
+ " can be done through the shell and does not require cluster restart");
+ // Initialize cluster config tracker
+ this.clusterConfigTracker = new ClusterConfigTracker(getZooKeeper(), this, configuration);
+ this.clusterConfigTracker.start();
+ return clusterConfigTracker.getClusterConfiguration();
+ }
+
/**
* Stall startup if we are designated a backup master; i.e. we want someone
* else to become the master before proceeding.
@@ -786,6 +815,9 @@
boolean rit = this.assignmentManager.
processRegionInTransitionAndBlockUntilAssigned(HRegionInfo.ROOT_REGIONINFO);
ServerName currentRootServer = null;
+ LOG.info("hbase.rpc.engine ======> " + conf.get("hbase.rpc.engine"));
+ //LOG.info("hbase.rpc.engine class ======> " + conf.getClass("hbase.rpc.engine", ProtobufRpcEngine.class));
+
boolean rootRegionLocation = catalogTracker.verifyRootRegionLocation(timeout);
if (!rit && !rootRegionLocation) {
currentRootServer = this.catalogTracker.getRootLocation();
@@ -1226,6 +1258,17 @@
return IsCatalogJanitorEnabledResponse.newBuilder().setValue(isEnabled).build();
}
+ @Override
+ public UpdateConfigResponse updateConfig(RpcController c, UpdateConfigRequest req) throws ServiceException {
+ boolean result = this.clusterConfigTracker.updateClusterConfig(req.getConfigKey(), req.getConfigValue());
+ return UpdateConfigResponse.newBuilder().setUpdateConfigResponse(result).build();
+ }
+
+ @Override
+ public GetConfigResponse getConfig(RpcController c, GetConfigRequest req) throws ServiceException {
+ return GetConfigResponse.newBuilder().setConfigValue(conf.get(req.getConfigKey())).build();
+ }
+
/**
* @return Maximum time we should run balancer for
*/
@@ -1893,6 +1936,11 @@
IOException, KeeperException, ExecutionException {
this.zooKeeper.reconnectAfterExpiration();
+ // Re initialize clusterConfigTracker if required.
+ if (dynamicConfigEnabled && clusterConfigTracker != null) {
+ this.clusterConfigTracker = new ClusterConfigTracker(getZooKeeper(), this, conf);
+ this.clusterConfigTracker.start();
+ }
Callable callable = new Callable () {
public Boolean call() throws InterruptedException,
@@ -1976,6 +2024,11 @@
}
@Override
+ public ClusterConfigTracker getClusterConfigTracker() {
+ return this.clusterConfigTracker;
+ }
+
+ @Override
public AssignmentManager getAssignmentManager() {
return this.assignmentManager;
}
@@ -1984,6 +2037,17 @@
return rsFatals;
}
+ @Override
+ public boolean updateConfig(String configKey, String configValue) {
+ return this.clusterConfigTracker.updateClusterConfig(configKey, configValue);
+ }
+
+ @Override
+ public String getConfig(String configKey) {
+ return this.conf.get(configKey);
+ }
+
+
public void shutdown() {
spanReceiverHost.closeReceivers();
if (cpHost != null) {
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java (revision 1382165)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java (working copy)
@@ -20,6 +20,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
import org.apache.hadoop.hbase.security.TokenInfo;
import org.apache.hadoop.hbase.security.KerberosInfo;
@@ -61,6 +62,10 @@
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetConfigResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UpdateConfigRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UpdateConfigResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetConfigRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
@@ -310,6 +315,12 @@
public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req)
throws ServiceException;
+ public UpdateConfigResponse updateConfig(RpcController c, UpdateConfigRequest req)
+ throws ServiceException;
+
+ public GetConfigResponse getConfig(RpcController c, GetConfigRequest req)
+ throws ServiceException;
+
/**
* Run a scan of the catalog table
* @param c Unused (set to null).
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseInMemoryConfiguration.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseInMemoryConfiguration.java (revision 0)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseInMemoryConfiguration.java (revision 0)
@@ -0,0 +1,256 @@
+package org.apache.hadoop.hbase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.util.StringUtils;
+
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class HBaseInMemoryConfiguration extends Configuration {
+
+ private static final Log LOG = LogFactory.getLog(HBaseInMemoryConfiguration.class);
+
+ private ConcurrentHashMap clusterConfigMap
+ = new ConcurrentHashMap(100);
+
+ public HBaseInMemoryConfiguration(Configuration configuration) {
+ super(configuration);
+ }
+
+ @Override
+ public String get(String configKey) {
+ if (clusterConfigMap.containsKey(configKey)) {
+ return clusterConfigMap.get(configKey);
+ }
+ return super.get(configKey);
+ }
+
+ @Override
+ public String getRaw(String name) {
+ return super.getRaw(name); //To change body of overridden methods use File | Settings | File Templates.
+ }
+
+ public void put(String configKey, String configValue) {
+ this.clusterConfigMap.put(configKey, configValue);
+ }
+
+ @Override
+ public long getLong(String name, long defaultValue) {
+ if (clusterConfigMap.containsKey(name)) {
+ String longValue = clusterConfigMap.get(name);
+ try {
+ return longValue == null ? defaultValue : Long.parseLong(longValue);
+ } catch (NumberFormatException nfe) {
+ return super.getLong(name, defaultValue);
+ }
+ }
+ return super.getLong(name, defaultValue);
+ }
+
+ @Override
+ public int getInt(String name, int defaultValue) {
+ if (clusterConfigMap.containsKey(name)) {
+ String intValue = clusterConfigMap.get(name);
+ try {
+ return intValue == null ? defaultValue : Integer.parseInt(intValue);
+ } catch (NumberFormatException nfe) {
+ return super.getInt(name, defaultValue);
+ }
+ }
+ return super.getInt(name, defaultValue);
+ }
+
+ @Override
+ public String get(String name, String defaultValue) {
+ if (clusterConfigMap.containsKey(name)) {
+ String stringValue = clusterConfigMap.get(name);
+ return stringValue == null ? defaultValue : stringValue;
+ }
+ return super.get(name, defaultValue);
+ }
+
+ @Override
+ public float getFloat(String name, float defaultValue) {
+ if (clusterConfigMap.containsKey(name)) {
+ String floatValue = clusterConfigMap.get(name);
+ try {
+ return floatValue == null ? defaultValue : Float.parseFloat(floatValue);
+ } catch (NumberFormatException nfe) {
+ return super.getFloat(name, defaultValue);
+ }
+ }
+ return super.getFloat(name, defaultValue);
+ }
+
+ @Override
+ public boolean getBoolean(String name, boolean defaultValue) {
+ if (clusterConfigMap.containsKey(name)) {
+ String booleanValue = clusterConfigMap.get(name);
+ if (booleanValue == null) {
+ return defaultValue;
+ }
+ booleanValue = booleanValue.trim();
+ if ("true".equalsIgnoreCase(booleanValue)) {
+ return true;
+ } else if ("false".equals(booleanValue)) {
+ return false;
+ } else {
+ return defaultValue;
+ }
+ }
+ return super.getBoolean(name, defaultValue);
+ }
+
+
+ @Override
+ public void set(String name, String value) {
+ super.set(name, value);
+ clusterConfigMap.put(name, value);
+ }
+
+ @Override
+ public void setIfUnset(String name, String value) {
+ if (clusterConfigMap.get(name) == null) {
+ set(name, value);
+ }
+ }
+
+ @Override
+ public void setInt(String name, int value) {
+ set(name, Integer.toString(value));
+ }
+
+ @Override
+ public void setLong(String name, long value) {
+ set(name, Long.toString(value));
+ }
+
+ @Override
+ public void setFloat(String name, float value) {
+ set(name, Float.toString(value));
+ }
+
+ @Override
+ public void setBoolean(String name, boolean value) {
+ set(name, Boolean.toString(value));
+ }
+
+ @Override
+ public void setBooleanIfUnset(String name, boolean value) {
+ setIfUnset(name, Boolean.toString(value));
+ }
+
+ @Override
+ public > void setEnum(String name, T value) {
+ set(name, value.toString());
+ }
+
+ @Override
+ public > T getEnum(String name, T defaultValue) {
+ return super.getEnum(name, defaultValue);
+ }
+
+ @Override
+ public IntegerRanges getRange(String name, String defaultValue) {
+ return super.getRange(name, defaultValue);
+ }
+
+ @Override
+ public Collection getStringCollection(String name) {
+ return super.getStringCollection(name);
+ }
+
+ @Override
+ public String[] getStrings(String name) {
+ return super.getStrings(name);
+ }
+
+ @Override
+ public String[] getStrings(String name, String... defaultValue) {
+ return super.getStrings(name, defaultValue);
+ }
+
+ @Override
+ public void setStrings(String name, String... values) {
+ set(name, StringUtils.arrayToString(values));
+ }
+
+ @Override
+ public Class> getClassByName(String name) throws ClassNotFoundException {
+ return super.getClassByName(name);
+ }
+
+ @Override
+ public Class>[] getClasses(String name, Class>... defaultValue) {
+ return super.getClasses(name, defaultValue);
+ }
+
+ @Override
+ public Class> getClass(String name, Class> defaultValue) {
+ return super.getClass(name, defaultValue);
+ }
+
+ @Override
+ public Class extends U> getClass(String name, Class extends U> defaultValue, Class xface) {
+ return super.getClass(name, defaultValue, xface);
+ }
+
+ @Override
+ public void setClass(String name, Class> theClass, Class> xface) {
+ super.setClass(name, theClass, xface);
+ clusterConfigMap.put(name, theClass.getName());
+ }
+
+ /**
+ * Rebuild the cluster configuration map from ZK node data.
+ * @param clusterData
+ */
+ public void refreshClusterConfigMap(List clusterData) {
+ if (clusterData != null) {
+ LOG.debug("Found Cluster configuration in ZK. Refreshing the in memory configuration map from ZK");
+ for (ZKUtil.NodeAndData nodeData : clusterData) {
+ String configNodePath = nodeData.getNode();
+ String configKey = configNodePath.substring(configNodePath.lastIndexOf("/") + 1,
+ configNodePath.length());
+ clusterConfigMap.put(configKey, Bytes.toString(nodeData.getData()));
+ }
+ LOG.debug("Refreshed cluster configuration map. Current size = "
+ + clusterConfigMap.size());
+ }
+ }
+
+ /**
+ * Get a read only version of the current configuration map.
+ *
+ * @return
+ */
+ public Map getClusterConfigMap() {
+ return Collections.unmodifiableMap(clusterConfigMap);
+ }
+}
Index: hbase-server/src/main/ruby/hbase/admin.rb
===================================================================
--- hbase-server/src/main/ruby/hbase/admin.rb (revision 1382165)
+++ hbase-server/src/main/ruby/hbase/admin.rb (working copy)
@@ -113,6 +113,20 @@
end
#----------------------------------------------------------------------------------------------
+ # Update a cluster configuration
+ # Returns true if update cluster config is successful.
+ def update_config(configKey, configValue)
+ @admin.updateConfig(configKey, configValue)
+ end
+
+ #----------------------------------------------------------------------------------------------
+ # Get current cluster configuration for a key
+ # Returns value of cluster config key.
+ def get_config(configKey)
+ @admin.getConfig(configKey)
+ end
+
+ #----------------------------------------------------------------------------------------------
# Enables a table
def enable(table_name)
tableExists(table_name)
Index: hbase-server/src/main/ruby/shell.rb
===================================================================
--- hbase-server/src/main/ruby/shell.rb (revision 1382165)
+++ hbase-server/src/main/ruby/shell.rb (working copy)
@@ -289,6 +289,8 @@
catalogjanitor_run
catalogjanitor_switch
catalogjanitor_enabled
+ update_config
+ get_config
]
)
Index: hbase-server/src/main/resources/hbase-default.xml
===================================================================
--- hbase-server/src/main/resources/hbase-default.xml (revision 1382165)
+++ hbase-server/src/main/resources/hbase-default.xml (working copy)
@@ -859,6 +859,7 @@
files when hbase.data.umask.enable is true
+
hbase.metrics.showTableName
true
@@ -869,6 +870,16 @@
+ hbase.dynamic.configuration.enabled
+ false
+
+ Enables the dynamic cluster configuration ability.If enabled, you can make
+ runtime cluster configuration changes with out the need for restarting the cluster.
+
+
+
+
+
hbase.metrics.exposeOperationTimes
true
Whether to report metrics about time taken performing an
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (revision 1382165)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (working copy)
@@ -600,7 +600,7 @@
"hbase.regionserver.hostname.seen.by.master";
public static final String HBASE_MASTER_LOGCLEANER_PLUGINS =
- "hbase.master.logcleaner.plugins";
+ "hbase.master.logcleaner.plugins";
public static final String HBASE_REGION_SPLIT_POLICY_KEY =
"hbase.regionserver.region.split.policy";