Index: src/test/java/org/apache/hadoop/hbase/master/TestExcludeList.java
===================================================================
--- src/test/java/org/apache/hadoop/hbase/master/TestExcludeList.java (revision 0)
+++ src/test/java/org/apache/hadoop/hbase/master/TestExcludeList.java (revision 0)
@@ -0,0 +1,113 @@
+/**
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.BufferedWriter;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
+import org.junit.Test;
+
+public class TestExcludeList {
+ private static final Log LOG = LogFactory.getLog(TestExcludeList.class);
+
+ /**
+ * Simple test of exclude file list.
+ *
+ * This test starts a region server, then adds it to the excludes file,
+ * asks the master to refresh it and verifies that the region server has
+ * actually been kicked out.
+ *
+ * @throws Exception
+ */
+ @Test (timeout=180000)
+ public void testSimpleExclude() throws Exception {
+
+ final int NUM_MASTERS = 1;
+ final int NUM_RS = 3;
+
+ // Create config to use for this cluster
+ Configuration conf = HBaseConfiguration.create();
+ String exFilePath = HBaseTestingUtility.getTestDir().toString()+Path.SEPARATOR+"decom";
+
+ // change the configuration and expire the region server
+ conf.set("hbase.hosts.decom", exFilePath);
+
+ // Start the cluster
+ HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
+ TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
+ MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+
+ // start a new region server
+ JVMClusterUtil.RegionServerThread rservThread = cluster.startRegionServer();
+
+ HServerInfo info = rservThread.getRegionServer().getHServerInfo();
+
+ // make sure the new server is up and running
+ assertTrue(cluster.getMaster().getServerManager().getOnlineServersList().contains(info));
+
+ // write the info of this reqion server to the excludes list
+ try {
+ BufferedWriter out = new BufferedWriter(new FileWriter(exFilePath));
+ out.write(info.getHostnamePort());
+ out.close();
+ } catch (IOException e) {
+ }
+
+ // Ask master to refresh its exclude list. This should expire the server
+ cluster.getMaster().refreshNodes();
+
+ // verify the region server is indeed offline.
+ // This will happen only after a certain time window has passed and the regions have moved over.
+ // just sleep for a while - bad idea because this will make the test flaky if it takes a
+ // variable amount of time to do the region failover but hopefully it doesn't take more
+ // than 6s.
+ for (int i=0;i<3;i++) {
+ Thread.sleep(2000);
+ if (!cluster.getMaster().getServerManager().getOnlineServersList().contains(info));
+ break;
+ }
+
+ assertTrue(!cluster.getMaster().getServerManager().getOnlineServersList().contains(info));
+
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/NodeDecommissionedException.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/NodeDecommissionedException.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/NodeDecommissionedException.java (revision 0)
@@ -0,0 +1,33 @@
+/**
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+/**
+ * This exception is thrown when a region server that is not there in the
+ * include list or is there in the exclude list tries to report for duty.
+ *
+ */
+public class NodeDecommissionedException extends IOException {
+ public NodeDecommissionedException(String message) {
+ super(message);
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 14197)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy)
@@ -117,6 +117,7 @@
import org.apache.hadoop.hbase.regionserver.wal.WALObserver;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.NodeDecommissionedException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CompressionTest;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -622,7 +623,7 @@
if (e instanceof IOException) {
e = RemoteExceptionHandler.checkIOException((IOException) e);
}
- if (e instanceof YouAreDeadException) {
+ if (e instanceof YouAreDeadException || e instanceof NodeDecommissionedException) {
// This will be caught and handled as a fatal error below
throw e;
}
@@ -1581,6 +1582,12 @@
ioe);
// Re-throw IOE will cause RS to abort
throw ioe;
+ }
+ if (ioe instanceof NodeDecommissionedException) {
+ LOG.fatal("Master rejected because this node has been put in the decommissioned list",
+ ioe);
+ // Re-throw IOE will cause RS to abort
+ throw ioe;
} else {
LOG.warn("remote error telling master we are up", e);
}
Index: src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (revision 14197)
+++ src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (working copy)
@@ -23,6 +23,8 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -37,6 +39,7 @@
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HServerLoad;
+import org.apache.hadoop.hbase.NodeDecommissionedException;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.YouAreDeadException;
@@ -49,6 +52,7 @@
import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
+import org.apache.hadoop.util.HostsFileReader;
/**
* The ServerManager class manages info about region servers - HServerInfo,
@@ -74,6 +78,9 @@
private final Map onlineServers =
new ConcurrentHashMap();
+ private final Set decommissionedServers =
+ new HashSet();
+
// TODO: This is strange to have two maps but HSI above is used on both sides
/**
* Map from full server-instance name to the RPC connection for this server.
@@ -91,19 +98,28 @@
private final long maxSkew;
+ private final String HOSTS_PROP_NAME = "hbase.hosts";
+ private final String DECOMMISSIONED_HOSTS_PROP_NAME = "hbase.hosts.decom";
+
+ private HostsFileReader hostsReader;
+
/**
* Constructor.
* @param master
* @param services
* @param metrics
+ * @throws IOException
*/
public ServerManager(final Server master, final MasterServices services,
- MasterMetrics metrics) {
+ MasterMetrics metrics) throws IOException {
this.master = master;
this.services = services;
this.metrics = metrics;
Configuration c = master.getConfiguration();
maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
+ this.hostsReader = new HostsFileReader(
+ c.get(HOSTS_PROP_NAME, ""),
+ c.get(DECOMMISSIONED_HOSTS_PROP_NAME, ""));
this.deadservers =
new DeadServer(c.getInt("hbase.master.maxdeadservers", 100));
}
@@ -124,13 +140,61 @@
// in, it should have been removed from serverAddressToServerInfo and queued
// for processing by ProcessServerShutdown.
HServerInfo info = new HServerInfo(serverInfo);
+ if (!checkHostsLists(info)) {
+ throw new NodeDecommissionedException("This node is not allowed to join the hbase cluster");
+ }
checkIsDead(info.getServerName(), "STARTUP");
checkAlreadySameHostPort(info);
checkClockSkew(info, serverCurrentTime);
recordNewServer(info, false, null);
}
+ /**
+ * Keeps track of which datanodes/ipaddress are allowed to connect to the namenode.
+ * @param info The info of the server to check.
+ */
+ public boolean checkHostsLists(HServerInfo info) {
+ Set hostsList = hostsReader.getHosts();
+ boolean inHostsList =
+ (hostsList.isEmpty() ||
+ (hostsList.contains(info.getHostname())) ||
+ (hostsList.contains(info.getHostnamePort())));
+
+ Set decommissionList = hostsReader.getExcludedHosts();
+ boolean inDecommissionedHostsList =
+ (decommissionList.contains(info.getHostname()) ||
+ decommissionList.contains(info.getHostnamePort()));
+
+ return (inHostsList && !inDecommissionedHostsList);
+ }
+
/**
+ * Rereads the config to get hosts and decommission list file names.
+ * Rereads the files to update the hosts and decommission lists. It
+ * checks if any of the hosts have changed states:
+ * 1. Added to hosts --> no further work needed here.
+ * 2. Removed from hosts --> expire the server and failover the regions.
+ * 3. Added to decommission --> move the regions off the server and expire it.
+ * 4. Removed from decommission --> Allow the region server to come back in again.
+ */
+ public void refreshNodes() throws IOException {
+ // Reread the config to get hbase.hosts and hbase.hosts.exclude filenames.
+ // Update the file names and refresh internal includes and excludes list
+
+ Configuration conf = this.master.getConfiguration();
+ hostsReader.updateFileNames(conf.get(HOSTS_PROP_NAME,""),
+ conf.get(DECOMMISSIONED_HOSTS_PROP_NAME, ""));
+ hostsReader.refresh();
+ for (HServerInfo serverInfo :this.onlineServers.values()) {
+ // Check if the server is allowed.
+ if (!checkHostsLists(serverInfo)) {
+ // this server has been disallowed - kick it out.
+ decommissionServer(serverInfo);
+ }
+ }
+ }
+
+ /**
* Test to see if we have a server of same host and port already.
* @param serverInfo
* @throws PleaseHoldException
@@ -458,12 +522,15 @@
}
}
- /*
- * Expire the passed server. Add it to list of deadservers and queue a
- * shutdown processing.
+ /**
+ * Marks a server as expired.
+ * CAUTION - this should only be called in conjunction with submitShutdownHandler.
+ * otherwise the regions will not fail over. The only caller outside this class should
+ * be ServerShutdownHandler. This is marked public only because java doesn't support
+ * protected.
*/
- public synchronized void expireServer(final HServerInfo hsi) {
- // First check a server to expire. ServerName is of the form:
+ public synchronized void markServerExpiry(final HServerInfo hsi) {
+ // First check a server to expire. ServerName is of the form:
// , ,
String serverName = hsi.getServerName();
HServerInfo info = this.onlineServers.get(serverName);
@@ -474,8 +541,8 @@
}
if (this.deadservers.contains(serverName)) {
// TODO: Can this happen? It shouldn't be online in this case?
- LOG.warn("Received expiration of " + hsi.getServerName() +
- " but server shutdown is already in progress");
+ LOG.warn("Received expiration of " + hsi.getServerName() +
+ " but server shutdown is already in progress");
return;
}
// Remove the server from the known servers lists and update load info BUT
@@ -484,16 +551,17 @@
this.deadservers.add(serverName);
this.onlineServers.remove(serverName);
this.serverConnections.remove(serverName);
- // If cluster is going down, yes, servers are going to be expiring; don't
- // process as a dead server
- if (this.clusterShutdown) {
- LOG.info("Cluster shutdown set; " + hsi.getServerName() +
- " expired; onlineServers=" + this.onlineServers.size());
- if (this.onlineServers.isEmpty()) {
- master.stop("Cluster shutdown set; onlineServer=0");
- }
- return;
+ if (this.decommissionedServers.contains(serverName)) {
+ this.decommissionedServers.remove(serverName);
}
+
+ LOG.debug("Added=" + serverName + " to dead servers");
+ }
+
+ /**
+ * Create and submit the handler for handling shutdown of a node.
+ */
+ private synchronized void submitShutdownHandler(final HServerInfo hsi, boolean decommission) {
CatalogTracker ct = this.master.getCatalogTracker();
// Was this server carrying root?
boolean carryingRoot;
@@ -510,22 +578,65 @@
// may have reset the meta location as null already (it may have already
// run into fact that meta is dead). I can ask assignment manager. It
// has an inmemory list of who has what. This list will be cleared as we
- // process the dead server but should be find asking it now.
+ // process the dead server but should be fine asking it now.
HServerAddress address = ct.getMetaLocation();
boolean carryingMeta =
address != null && hsi.getServerAddress().equals(address);
if (carryingRoot || carryingMeta) {
this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
- this.services, this.deadservers, info, carryingRoot, carryingMeta));
+ this.services, this.deadservers, hsi, decommission, carryingRoot, carryingMeta));
} else {
this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
- this.services, this.deadservers, info));
+ this.services, this.deadservers, hsi, decommission));
}
- LOG.debug("Added=" + serverName +
- " to dead servers, submitted shutdown handler to be executed, root=" +
+
+ LOG.debug(hsi.getServerName()
+ + ": submitted shutdown handler to be executed, root=" +
carryingRoot + ", meta=" + carryingMeta);
}
+ /*
+ * Expire the passed server. Add it to list of dead servers and queue a
+ * shutdown processing.
+ */
+ public synchronized void expireServer(final HServerInfo hsi) {
+
+ markServerExpiry(hsi);
+
+ // If cluster is going down, yes, servers are going to be expiring; don't
+ // process as a dead server
+ if (this.clusterShutdown) {
+ LOG.info("Cluster shutdown set; " + hsi.getServerName()
+ + " expired; onlineServers=" + this.onlineServers.size());
+ if (this.onlineServers.isEmpty()) {
+ master.stop("Cluster shutdown set; onlineServer=0");
+ }
+ return;
+ }
+
+ submitShutdownHandler(hsi, false);
+
+ }
+
+ /*
+ * Decommission the passed server. This will first move all the regions off
+ * this server and then mark it for expiry.
+ */
+ private synchronized void decommissionServer(final HServerInfo hsi) {
+ String serverName = hsi.getServerName();
+ if (this.decommissionedServers.contains(serverName)) {
+ LOG.debug(serverName
+ + " is already in the process of being decommissioned. Ignoring..");
+ return;
+ } else {
+ // Add it to decommissioned servers list so we don't place any regions here
+ this.decommissionedServers.add(hsi.getServerName());
+ LOG.debug("Added=" + serverName + " to decommissioned servers.");
+ }
+
+ submitShutdownHandler(hsi, true);
+ }
+
// RPC methods to region servers
/**
@@ -673,6 +784,25 @@
return new ArrayList(onlineServers.values());
}
+ /**
+ * @return A copy of the internal list of servers that can be assigned regions.
+ * This essentially removes the decommissioned servers from the online list.
+ */
+ public List getAssignableServersList() {
+
+ List assignableServerList = new ArrayList(
+ onlineServers.values());
+
+ // Remove the decommissioned servers.
+ // Most of the times this list should be empty since decommissioned servers
+ // are taken offline soon after the load is taken off them.
+ for (String decomname : decommissionedServers) {
+ assignableServerList.remove(onlineServers.get(decomname));
+ }
+
+ return assignableServerList;
+ }
+
public boolean isServerOnline(String serverName) {
return onlineServers.containsKey(serverName);
}
Index: src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (revision 14197)
+++ src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (working copy)
@@ -994,7 +994,7 @@
final HServerInfo serverToExclude, final boolean forceNewPlan) {
// Pickup existing plan or make a new one
String encodedName = state.getRegion().getEncodedName();
- List servers = this.serverManager.getOnlineServersList();
+ List servers = this.serverManager.getAssignableServersList();
// The remove below hinges on the fact that the call to
// serverManager.getOnlineServersList() returns a copy
if (serverToExclude != null) servers.remove(serverToExclude);
@@ -1215,7 +1215,7 @@
*/
public void assignAllUserRegions() throws IOException, InterruptedException {
// Get all available servers
- List servers = serverManager.getOnlineServersList();
+ List servers = serverManager.getAssignableServersList();
// Scan META for all user regions, skipping any disabled tables
Map allRegions =
Index: src/main/java/org/apache/hadoop/hbase/master/HMaster.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 14197)
+++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy)
@@ -1048,6 +1048,17 @@
return ProtocolSignature.getProtocolSigature(
this, protocol, clientVersion, clientMethodsHash);
}
+
+ /**
+ * Refresh the nodes list files. This function refreshes both the includes
+ * and the excludes files.
+ * @throws IOException
+ */
+ @Override
+ public void refreshNodes()
+ throws IOException {
+ this.getServerManager().refreshNodes();
+ }
/**
* Utility for constructing an instance of the passed HMaster class.
Index: src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java (revision 14197)
+++ src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java (working copy)
@@ -56,20 +56,24 @@
private final Server server;
private final MasterServices services;
private final DeadServer deadServers;
+ private final boolean decommission;
public ServerShutdownHandler(final Server server, final MasterServices services,
- final DeadServer deadServers, final HServerInfo hsi) {
- this(server, services, deadServers, hsi, EventType.M_SERVER_SHUTDOWN);
+ final DeadServer deadServers, final HServerInfo hsi, final boolean decommission) {
+ this(server, services, deadServers, hsi, decommission, EventType.M_SERVER_SHUTDOWN);
}
ServerShutdownHandler(final Server server, final MasterServices services,
- final DeadServer deadServers, final HServerInfo hsi, EventType type) {
+ final DeadServer deadServers, final HServerInfo hsi, final boolean decommission, EventType type) {
super(server, type);
this.hsi = hsi;
this.server = server;
this.services = services;
this.deadServers = deadServers;
- if (!this.deadServers.contains(hsi.getServerName())) {
+ this.decommission = decommission;
+
+ // The server should either be in the decommissioning process or dead
+ if (!decommission && !this.deadServers.contains(hsi.getServerName())) {
LOG.warn(hsi.getServerName() + " is NOT in deadservers; it should be!");
}
}
@@ -153,8 +157,16 @@
this.services.getAssignmentManager().assign(e.getKey(), true);
}
}
+
+ // All the reassigning has been successfully done. If it was a decommission
+ // mark the decommissioned node for expiry. This will lead the decommissioned
+ // node to shut itself down and will prevent it from joining back again.
+ if (this.decommission) {
+ this.services.getServerManager().markServerExpiry(this.hsi);
+ }
+
this.deadServers.finish(serverName);
- LOG.info("Finished processing of shutdown of " + serverName);
+ LOG.info("Finished processing of "+ (this.decommission?"decommission":"shutdown") +" of " + serverName);
}
/**
Index: src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java (revision 14197)
+++ src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java (working copy)
@@ -34,9 +34,12 @@
public MetaServerShutdownHandler(final Server server,
final MasterServices services,
- final DeadServer deadServers, final HServerInfo hsi,
- final boolean carryingRoot, final boolean carryingMeta) {
- super(server, services, deadServers, hsi, EventType.M_META_SERVER_SHUTDOWN);
+ final DeadServer deadServers,
+ final HServerInfo hsi,
+ final boolean decommission,
+ final boolean carryingRoot,
+ final boolean carryingMeta) {
+ super(server, services, deadServers, hsi, decommission, EventType.M_META_SERVER_SHUTDOWN);
this.carryingRoot = carryingRoot;
this.carryingMeta = carryingMeta;
}
Index: src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (revision 14197)
+++ src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (working copy)
@@ -187,4 +187,11 @@
* @return Previous balancer value
*/
public boolean balanceSwitch(final boolean b);
+
+ /**
+ * Refresh the nodes list files. This function refreshes both the includes
+ * and the excludes files.
+ * @throws IOException
+ */
+ public void refreshNodes() throws IOException;
}
\ No newline at end of file
Index: src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (revision 14197)
+++ src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (working copy)
@@ -991,6 +991,16 @@
}
/**
+ * Refresh the nodes list files. This function refreshes both the includes
+ * and the excludes files.
+ * @throws IOException
+ */
+ public void refreshNodes() throws IOException
+ {
+ getMaster().refreshNodes();
+ }
+
+ /**
* Turn the load balancer on or off.
* @param b If true, enable balancer. If false, disable balancer.
* @return Previous balancer value
Index: src/main/ruby/hbase/admin.rb
===================================================================
--- src/main/ruby/hbase/admin.rb (revision 14197)
+++ src/main/ruby/hbase/admin.rb (working copy)
@@ -193,6 +193,12 @@
end
#----------------------------------------------------------------------------------------------
+ # Refresh the nodes files
+ def refreshNodes()
+ @admin.refreshNodes()
+ end
+
+ #----------------------------------------------------------------------------------------------
# Returns table's structure description
def describe(table_name)
tables = @admin.listTables.to_a
Index: src/main/ruby/shell/commands/refresh_nodes.rb
===================================================================
--- src/main/ruby/shell/commands/refresh_nodes.rb (revision 0)
+++ src/main/ruby/shell/commands/refresh_nodes.rb (revision 0)
@@ -0,0 +1,40 @@
+#
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+ module Commands
+ class RefreshNodes < Command
+ def help
+ return <<-EOF
+Refresh the nodes list files. This function refreshes both the includes
+and the decommission files. Use with caution. For expert use only. Examples:
+
+ hbase> refresh_nodes
+EOF
+ end
+
+ def command()
+ format_simple_command do
+ admin.refreshNodes()
+ end
+ end
+ end
+ end
+end
Index: src/main/ruby/shell.rb
===================================================================
--- src/main/ruby/shell.rb (revision 14197)
+++ src/main/ruby/shell.rb (working copy)
@@ -256,6 +256,7 @@
flush
major_compact
move
+ refresh_nodes
split
unassign
zk_dump