diff --git a/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index 789cad4..f863e24 100644 --- a/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -48,10 +48,11 @@ import org.apache.hadoop.io.VersionedWritable; * */ public class ClusterStatus extends VersionedWritable { + // TODO: The pre-region 'load' is temporarily broken. To be fixed. private static final byte VERSION = 0; private String hbaseVersion; - private Collection liveServerInfo; + private Collection servers; private Collection deadServers; private Map intransition; @@ -72,8 +73,8 @@ public class ClusterStatus extends VersionedWritable { /** * @return the number of region servers in the cluster */ - public int getServers() { - return liveServerInfo.size(); + public int getServersSize() { + return servers.size(); } /** @@ -88,10 +89,8 @@ public class ClusterStatus extends VersionedWritable { */ public double getAverageLoad() { int load = 0; - for (HServerInfo server: liveServerInfo) { - load += server.getLoad().getLoad(); - } - return (double)load / (double)liveServerInfo.size(); + // TODO: Fix. + return (double)load / (double)getServersSize(); } /** @@ -99,9 +98,7 @@ public class ClusterStatus extends VersionedWritable { */ public int getRegionsCount() { int count = 0; - for (HServerInfo server: liveServerInfo) { - count += server.getLoad().getNumberOfRegions(); - } + // TODO: Fix. return count; } @@ -110,9 +107,7 @@ public class ClusterStatus extends VersionedWritable { */ public int getRequestsCount() { int count = 0; - for (HServerInfo server: liveServerInfo) { - count += server.getLoad().getNumberOfRequests(); - } + // TODO: Fix. return count; } @@ -142,7 +137,7 @@ public class ClusterStatus extends VersionedWritable { } return (getVersion() == ((ClusterStatus)o).getVersion()) && getHBaseVersion().equals(((ClusterStatus)o).getHBaseVersion()) && - liveServerInfo.equals(((ClusterStatus)o).liveServerInfo) && + this.servers.equals(((ClusterStatus)o).servers) && deadServers.equals(((ClusterStatus)o).deadServers); } @@ -150,7 +145,7 @@ public class ClusterStatus extends VersionedWritable { * @see java.lang.Object#hashCode() */ public int hashCode() { - return VERSION + hbaseVersion.hashCode() + liveServerInfo.hashCode() + + return VERSION + hbaseVersion.hashCode() + this.servers.hashCode() + deadServers.hashCode(); } @@ -165,21 +160,24 @@ public class ClusterStatus extends VersionedWritable { /** * Returns detailed region server information: A list of - * {@link HServerInfo}, containing server load and resource usage - * statistics as {@link HServerLoad}, containing per-region - * statistics as {@link HServerLoad.RegionLoad}. + * {@link ServerName}. * @return region server information + * @deprecated Use {@link #getServers()} */ - public Collection getServerInfo() { - return Collections.unmodifiableCollection(liveServerInfo); + public Collection getServerInfo() { + return getServers(); + } + + public Collection getServers() { + return Collections.unmodifiableCollection(this.servers); } // // Setters // - public void setServerInfo(Collection serverInfo) { - this.liveServerInfo = serverInfo; + public void setServers(Collection names) { + this.servers = names; } public void setDeadServers(Collection deadServers) { @@ -201,9 +199,9 @@ public class ClusterStatus extends VersionedWritable { public void write(DataOutput out) throws IOException { super.write(out); out.writeUTF(hbaseVersion); - out.writeInt(liveServerInfo.size()); - for (HServerInfo server: liveServerInfo) { - server.write(out); + out.writeInt(getServersSize()); + for (ServerName sn: this.servers) { + out.writeUTF(sn.toString()); } out.writeInt(deadServers.size()); for (String server: deadServers) { @@ -220,11 +218,10 @@ public class ClusterStatus extends VersionedWritable { super.readFields(in); hbaseVersion = in.readUTF(); int count = in.readInt(); - liveServerInfo = new ArrayList(count); + this.servers = new ArrayList(count); for (int i = 0; i < count; i++) { - HServerInfo info = new HServerInfo(); - info.readFields(in); - liveServerInfo.add(info); + String str = in.readUTF(); + this.servers.add(new ServerName(str)); } count = in.readInt(); deadServers = new ArrayList(count); @@ -240,4 +237,4 @@ public class ClusterStatus extends VersionedWritable { this.intransition.put(key, regionState); } } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/HConstants.java b/src/main/java/org/apache/hadoop/hbase/HConstants.java index a44a0b9..6aef8f9 100644 --- a/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -360,10 +360,15 @@ public final class HConstants { /** HBCK special code name used as server name when manipulating ZK nodes */ public static final String HBCK_CODE_NAME = "HBCKServerName"; + public static final ServerName HBCK_CODE_SERVERNAME = + new ServerName(HBCK_CODE_NAME, -1, -1L); public static final String HBASE_MASTER_LOGCLEANER_PLUGINS = "hbase.master.logcleaner.plugins"; + public static final String KEY_FOR_HOSTNAME_SEEN_BY_MASTER = + "hbase.regionserver.hostname.seen.by.master"; + private HConstants() { // Can't be instantiated with this ctor. } diff --git a/src/main/java/org/apache/hadoop/hbase/HMsg.java b/src/main/java/org/apache/hadoop/hbase/HMsg.java deleted file mode 100644 index c53460f..0000000 --- a/src/main/java/org/apache/hadoop/hbase/HMsg.java +++ /dev/null @@ -1,256 +0,0 @@ -/** - * Copyright 2010 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.Writable; - -/** - * HMsg is used to send messages between master and regionservers. Messages are - * sent as payload on the regionserver-to-master heartbeats. Region assignment - * does not use this mechanism. It goes via zookeeper. - * - *

Most of the time the messages are simple but some messages are accompanied - * by the region affected. HMsg may also carry an optional message. - * - *

TODO: Clean out all messages that go from master to regionserver; by - * design, these are to go via zk from here on out. - */ -public class HMsg implements Writable { - public static final HMsg [] STOP_REGIONSERVER_ARRAY = - new HMsg [] {new HMsg(Type.STOP_REGIONSERVER)}; - public static final HMsg [] EMPTY_HMSG_ARRAY = new HMsg[0]; - - public static enum Type { - /** Master tells region server to stop. - */ - STOP_REGIONSERVER, - - /** - * Region server split the region associated with this message. - */ - REGION_SPLIT, - - /** - * When RegionServer receives this message, it goes into a sleep that only - * an exit will cure. This message is sent by unit tests simulating - * pathological states. - */ - TESTING_BLOCK_REGIONSERVER, - } - - private Type type = null; - private HRegionInfo info = null; - private byte[] message = null; - private HRegionInfo daughterA = null; - private HRegionInfo daughterB = null; - - /** Default constructor. Used during deserialization */ - public HMsg() { - this(null); - } - - /** - * Construct a message with the specified message and empty HRegionInfo - * @param type Message type - */ - public HMsg(final HMsg.Type type) { - this(type, new HRegionInfo(), null); - } - - /** - * Construct a message with the specified message and HRegionInfo - * @param type Message type - * @param hri Region to which message type applies - */ - public HMsg(final HMsg.Type type, final HRegionInfo hri) { - this(type, hri, null); - } - - /** - * Construct a message with the specified message and HRegionInfo - * - * @param type Message type - * @param hri Region to which message type applies. Cannot be - * null. If no info associated, used other Constructor. - * @param msg Optional message (Stringified exception, etc.) - */ - public HMsg(final HMsg.Type type, final HRegionInfo hri, final byte[] msg) { - this(type, hri, null, null, msg); - } - - /** - * Construct a message with the specified message and HRegionInfo - * - * @param type Message type - * @param hri Region to which message type applies. Cannot be - * null. If no info associated, used other Constructor. - * @param daughterA - * @param daughterB - * @param msg Optional message (Stringified exception, etc.) - */ - public HMsg(final HMsg.Type type, final HRegionInfo hri, - final HRegionInfo daughterA, final HRegionInfo daughterB, final byte[] msg) { - this.type = type; - if (hri == null) { - throw new NullPointerException("Region cannot be null"); - } - this.info = hri; - this.message = msg; - this.daughterA = daughterA; - this.daughterB = daughterB; - } - - /** - * @return Region info or null if none associated with this message type. - */ - public HRegionInfo getRegionInfo() { - return this.info; - } - - /** @return the type of message */ - public Type getType() { - return this.type; - } - - /** - * @param other Message type to compare to - * @return True if we are of same message type as other - */ - public boolean isType(final HMsg.Type other) { - return this.type.equals(other); - } - - /** @return the message type */ - public byte[] getMessage() { - return this.message; - } - - /** - * @return First daughter if Type is MSG_REPORT_SPLIT_INCLUDES_DAUGHTERS else - * null - */ - public HRegionInfo getDaughterA() { - return this.daughterA; - } - - /** - * @return Second daughter if Type is MSG_REPORT_SPLIT_INCLUDES_DAUGHTERS else - * null - */ - public HRegionInfo getDaughterB() { - return this.daughterB; - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.type.toString()); - // If null or empty region, don't bother printing it out. - if (this.info != null && this.info.getRegionName().length > 0) { - sb.append(": "); - sb.append(this.info.getRegionNameAsString()); - } - if (this.message != null && this.message.length > 0) { - sb.append(": " + Bytes.toString(this.message)); - } - return sb.toString(); - } - - /** - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - HMsg that = (HMsg)obj; - return this.type.equals(that.type) && - (this.info != null)? this.info.equals(that.info): - that.info == null; - } - - /** - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - int result = this.type.hashCode(); - if (this.info != null) { - result ^= this.info.hashCode(); - } - return result; - } - - // //////////////////////////////////////////////////////////////////////////// - // Writable - ////////////////////////////////////////////////////////////////////////////// - - /** - * @see org.apache.hadoop.io.Writable#write(java.io.DataOutput) - */ - public void write(DataOutput out) throws IOException { - out.writeInt(this.type.ordinal()); - this.info.write(out); - if (this.message == null || this.message.length == 0) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - Bytes.writeByteArray(out, this.message); - } - if (this.type.equals(Type.REGION_SPLIT)) { - this.daughterA.write(out); - this.daughterB.write(out); - } - } - - /** - * @see org.apache.hadoop.io.Writable#readFields(java.io.DataInput) - */ - public void readFields(DataInput in) throws IOException { - int ordinal = in.readInt(); - this.type = HMsg.Type.values()[ordinal]; - this.info.readFields(in); - boolean hasMessage = in.readBoolean(); - if (hasMessage) { - this.message = Bytes.readByteArray(in); - } - if (this.type.equals(Type.REGION_SPLIT)) { - this.daughterA = new HRegionInfo(); - this.daughterB = new HRegionInfo(); - this.daughterA.readFields(in); - this.daughterB.readFields(in); - } - } -} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java index bd353b8..618cd2e 100644 --- a/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java +++ b/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java @@ -20,19 +20,20 @@ package org.apache.hadoop.hbase; /** - * Contains the HRegionInfo for the region and the HServerAddress for the - * HRegionServer serving the region + * Data structure to hold HRegionInfo and the HServerAddress for the hosting + * HRegionServer. Immutable. */ public class HRegionLocation implements Comparable { - // TODO: Is this class necessary? Why not just have a Pair? - private HRegionInfo regionInfo; - private HServerAddress serverAddress; + // TODO: Do we need a class to pass HServerAddress? Won't InetSocketAddress + // do? Need to clean up client references to HServerAddress first. + // St.Ack 02/15/2011. + private final HRegionInfo regionInfo; + private final HServerAddress serverAddress; /** * Constructor - * * @param regionInfo the HRegionInfo for the region - * @param serverAddress the HServerAddress for the region server + * @param serverAddress the HServerAddress for the hosting region server */ public HRegionLocation(HRegionInfo regionInfo, HServerAddress serverAddress) { this.regionInfo = regionInfo; @@ -44,8 +45,8 @@ public class HRegionLocation implements Comparable { */ @Override public String toString() { - return "address: " + this.serverAddress.toString() + ", regioninfo: " + - this.regionInfo.getRegionNameAsString(); + return "region=" + this.regionInfo.getRegionNameAsString() + + ", address=" + this.serverAddress.toString(); } /** @@ -91,7 +92,7 @@ public class HRegionLocation implements Comparable { public int compareTo(HRegionLocation o) { int result = this.regionInfo.compareTo(o.regionInfo); - if(result == 0) { + if (result == 0) { result = this.serverAddress.compareTo(o.serverAddress); } return result; diff --git a/src/main/java/org/apache/hadoop/hbase/HServerAddress.java b/src/main/java/org/apache/hadoop/hbase/HServerAddress.java index 7f8a472..2f427d9 100644 --- a/src/main/java/org/apache/hadoop/hbase/HServerAddress.java +++ b/src/main/java/org/apache/hadoop/hbase/HServerAddress.java @@ -19,25 +19,34 @@ */ package org.apache.hadoop.hbase; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.io.WritableComparable; - import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -import java.net.InetSocketAddress; import java.net.InetAddress; +import java.net.InetSocketAddress; + +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.WritableComparable; /** - * HServerAddress is a "label" for a HBase server made of host and port number. + * HServerAddress hosts a {@link InetSocketAddress} making it + * {@link WritableComparable}. Resolves on construction AND on + * deserialization so could end up with different results if the + * two ends of serialization have different resolvers. + * Be careful where you use it. Should only be used when you need to pass + * an InetSocketAddress. Even then its a bad idea because of the above. */ public class HServerAddress implements WritableComparable { - private InetSocketAddress address; - String stringValue; + // TODO: This class is on its way out. Use InetSocketAddress or ServerName + // instead -- St.Ack 02/16/2011. + private InetSocketAddress address = null; + private String cachedToString = ""; + /** + * Constructor for deserializing use only. + */ public HServerAddress() { - this.address = null; - this.stringValue = null; + super(); } /** @@ -46,34 +55,20 @@ public class HServerAddress implements WritableComparable { */ public HServerAddress(InetSocketAddress address) { this.address = address; - this.stringValue = address.getAddress().getHostName() + ":" + - address.getPort(); checkBindAddressCanBeResolved(); + this.cachedToString = createCachedToString(); } - /** - * @param hostAndPort Hostname and port formatted as <hostname> ':' <port> - */ - public HServerAddress(String hostAndPort) { - int colonIndex = hostAndPort.lastIndexOf(':'); - if (colonIndex < 0) { - throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort); - } - String host = hostAndPort.substring(0, colonIndex); - int port = Integer.parseInt(hostAndPort.substring(colonIndex + 1)); - this.address = new InetSocketAddress(host, port); - this.stringValue = address.getHostName() + ":" + port; - checkBindAddressCanBeResolved(); + private String createCachedToString() { + return this.address.toString(); } /** - * @param bindAddress Hostname + * @param hostname Hostname * @param port Port number */ - public HServerAddress(String bindAddress, int port) { - this.address = new InetSocketAddress(bindAddress, port); - this.stringValue = address.getHostName() + ":" + port; - checkBindAddressCanBeResolved(); + public HServerAddress(final String hostname, final int port) { + this(new InetSocketAddress(hostname, port)); } /** @@ -81,45 +76,48 @@ public class HServerAddress implements WritableComparable { * @param other HServerAddress to copy from */ public HServerAddress(HServerAddress other) { - String bindAddress = other.getBindAddress(); - int port = other.getPort(); - this.address = new InetSocketAddress(bindAddress, port); - stringValue = other.stringValue; - checkBindAddressCanBeResolved(); + this(new InetSocketAddress(other.getHostname(), other.getPort())); } - /** @return Bind address */ + /** @return Bind address -- the raw IP, the result of a call to + * {@link InetSocketAddress#getAddress()#getHostAddress()} -- + * or null if cannot resolve */ public String getBindAddress() { - final InetAddress addr = address.getAddress(); - if (addr != null) { - return addr.getHostAddress(); - } else { - LogFactory.getLog(HServerAddress.class).error("Could not resolve the" - + " DNS name of " + stringValue); - return null; - } + // This returns null if the address is not resolved. + final InetAddress addr = this.address.getAddress(); + if (addr != null) return addr.getHostAddress(); + LogFactory.getLog(HServerAddress.class).error("Could not resolve the" + + " DNS name of " + this.address.toString()); + return null; } private void checkBindAddressCanBeResolved() { if (getBindAddress() == null) { throw new IllegalArgumentException("Could not resolve the" - + " DNS name of " + stringValue); + + " DNS name of " + this.address.toString()); } } /** @return Port number */ public int getPort() { - return address.getPort(); + return this.address.getPort(); } /** @return Hostname */ public String getHostname() { - return address.getHostName(); + return this.address.getHostName(); + } + + /** + * @return Returns ':' + */ + public String getHostnameAndPort() { + return getHostname() + ":" + getPort(); } /** @return The InetSocketAddress */ public InetSocketAddress getInetSocketAddress() { - return address; + return this.address; } /** @@ -127,27 +125,21 @@ public class HServerAddress implements WritableComparable { */ @Override public String toString() { - return stringValue == null ? "" : stringValue; + return this.cachedToString; } @Override public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null) { - return false; - } - if (getClass() != o.getClass()) { - return false; - } - return compareTo((HServerAddress) o) == 0; + if (this == o) return true; + if (o == null) return false; + if (getClass() != o.getClass()) return false; + return compareTo((HServerAddress)o) == 0; } @Override public int hashCode() { - int result = address.hashCode(); - result ^= stringValue.hashCode(); + int result = this.address.hashCode(); + result ^= this.cachedToString.hashCode(); return result; } @@ -158,24 +150,20 @@ public class HServerAddress implements WritableComparable { public void readFields(DataInput in) throws IOException { String hostname = in.readUTF(); int port = in.readInt(); - - if (hostname == null || hostname.length() == 0) { - address = null; - stringValue = null; - } else { - address = new InetSocketAddress(hostname, port); - stringValue = hostname + ":" + port; + if (hostname != null && hostname.length() > 0) { + this.address = new InetSocketAddress(hostname, port); checkBindAddressCanBeResolved(); + createCachedToString(); } } public void write(DataOutput out) throws IOException { - if (address == null) { + if (this.address == null) { out.writeUTF(""); out.writeInt(0); } else { - out.writeUTF(address.getAddress().getHostName()); - out.writeInt(address.getPort()); + out.writeUTF(this.address.getAddress().getHostName()); + out.writeInt(this.address.getPort()); } } @@ -187,7 +175,7 @@ public class HServerAddress implements WritableComparable { // Addresses as Strings may not compare though address is for the one // server with only difference being that one address has hostname // resolved whereas other only has IP. - if (address.equals(o.address)) return 0; + if (this.address.equals(o.address)) return 0; return toString().compareTo(o.toString()); } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/HServerInfo.java b/src/main/java/org/apache/hadoop/hbase/HServerInfo.java index c742951..ab3e807 100644 --- a/src/main/java/org/apache/hadoop/hbase/HServerInfo.java +++ b/src/main/java/org/apache/hadoop/hbase/HServerInfo.java @@ -23,68 +23,42 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.Comparator; -import java.util.Set; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; /** - * HServerInfo is meta info about an {@link HRegionServer}. It is the token - * by which a master distingushes a particular regionserver from the rest. - * It holds hostname, ports, regionserver startcode, and load. Each server has - * a servername where servername is made up of a concatenation of - * hostname, port, and regionserver startcode. This servername is used in - * various places identifying this regionserver. Its even used as part of - * a pathname in the filesystem. As part of the initialization, - * master will pass the regionserver the address that it knows this regionserver - * by. In subsequent communications, the regionserver will pass a HServerInfo - * with the master-supplied address. + * HServerInfo is meta info about an {@link HRegionServer}. It hosts the + * {@link HServerAddress}, its webui port, and its server startcode. + * @deprecated User {@link InetSocketAddress} and or {@link ServerName} */ public class HServerInfo implements WritableComparable { - /* - * This character is used as separator between server hostname and port and - * its startcode. Servername is formatted as - * <hostname> '{@ink #SERVERNAME_SEPARATOR"}' <port> '{@ink #SERVERNAME_SEPARATOR"}' <startcode>. - */ - private static final String SERVERNAME_SEPARATOR = ","; - + // You used to get the 'serverName' from this class but now you must go to + // ServerName instances. private HServerAddress serverAddress; private long startCode; - private HServerLoad load; - private int infoPort; - // Servername is made of hostname, port and startcode. - private String serverName = null; - // Hostname of the regionserver. - private String hostname; - private String cachedHostnamePort = null; + private int webuiport; + private String cachedToString = null; public HServerInfo() { - this(new HServerAddress(), 0, HConstants.DEFAULT_REGIONSERVER_INFOPORT, - "default name"); + this(new HServerAddress(), 0, HConstants.DEFAULT_REGIONSERVER_INFOPORT); } /** - * Constructor that creates a HServerInfo with a generated startcode and an - * empty load. - * @param serverAddress An {@link InetSocketAddress} encased in a {@link Writable} - * @param infoPort Port the webui runs on. - * @param hostname Server hostname. + * Constructor that creates a HServerInfo with a generated startcode + * @param serverAddress + * @param webuiport Port the webui runs on. */ - public HServerInfo(HServerAddress serverAddress, final int infoPort, - final String hostname) { - this(serverAddress, System.currentTimeMillis(), infoPort, hostname); + public HServerInfo(final HServerAddress serverAddress, final int webuiport) { + this(serverAddress, System.currentTimeMillis(), webuiport); } public HServerInfo(HServerAddress serverAddress, long startCode, - final int infoPort, String hostname) { + final int webuiport) { this.serverAddress = serverAddress; this.startCode = startCode; - this.load = new HServerLoad(); - this.infoPort = infoPort; - this.hostname = hostname; + this.webuiport = webuiport; } /** @@ -94,106 +68,27 @@ public class HServerInfo implements WritableComparable { public HServerInfo(HServerInfo other) { this.serverAddress = new HServerAddress(other.getServerAddress()); this.startCode = other.getStartCode(); - this.load = other.getLoad(); - this.infoPort = other.getInfoPort(); - this.hostname = other.hostname; - } - - public HServerLoad getLoad() { - return load; - } - - public void setLoad(HServerLoad load) { - this.load = load; + this.webuiport = other.getInfoPort(); } public synchronized HServerAddress getServerAddress() { return new HServerAddress(serverAddress); } - public synchronized void setServerAddress(HServerAddress serverAddress) { - this.serverAddress = serverAddress; - this.hostname = serverAddress.getHostname(); - this.serverName = null; - } - public synchronized long getStartCode() { return startCode; } public int getInfoPort() { - return this.infoPort; - } - - public String getHostname() { - return this.hostname; - } - - /** - * @return The hostname and port concatenated with a ':' as separator. - */ - public synchronized String getHostnamePort() { - if (this.cachedHostnamePort == null) { - this.cachedHostnamePort = getHostnamePort(this.hostname, this.serverAddress.getPort()); - } - return this.cachedHostnamePort; - } - - /** - * @param hostname - * @param port - * @return The hostname and port concatenated with a ':' as separator. - */ - public static String getHostnamePort(final String hostname, final int port) { - return hostname + ":" + port; - } - - /** - * Gets the unique server instance name. Includes the hostname, port, and - * start code. - * @return Server name made of the concatenation of hostname, port and - * startcode formatted as <hostname> ',' <port> ',' <startcode> - */ - public synchronized String getServerName() { - if (this.serverName == null) { - this.serverName = getServerName(this.hostname, - this.serverAddress.getPort(), this.startCode); - } - return this.serverName; + return getWebuiPort(); } - public static synchronized String getServerName(final String hostAndPort, - final long startcode) { - int index = hostAndPort.indexOf(":"); - if (index <= 0) throw new IllegalArgumentException("Expected ':' "); - return getServerName(hostAndPort.substring(0, index), - Integer.parseInt(hostAndPort.substring(index + 1)), startcode); - } - - /** - * @param address Server address - * @param startCode Server startcode - * @return Server name made of the concatenation of hostname, port and - * startcode formatted as <hostname> ',' <port> ',' <startcode> - */ - public static String getServerName(HServerAddress address, long startCode) { - return getServerName(address.getHostname(), address.getPort(), startCode); + public int getWebuiPort() { + return this.webuiport; } - /* - * @param hostName - * @param port - * @param startCode - * @return Server name made of the concatenation of hostname, port and - * startcode formatted as <hostname> ',' <port> ',' <startcode> - */ - public static String getServerName(String hostName, int port, long startCode) { - StringBuilder name = new StringBuilder(hostName); - name.append(SERVERNAME_SEPARATOR); - name.append(port); - name.append(SERVERNAME_SEPARATOR); - name.append(startCode); - return name.toString(); + public String getHostname() { + return this.serverAddress.getHostname(); } /** @@ -202,81 +97,49 @@ public class HServerInfo implements WritableComparable { * @see #getLoad() */ @Override - public String toString() { - return "serverName=" + getServerName() + - ", load=(" + this.load.toString() + ")"; + public synchronized String toString() { + if (this.cachedToString == null) { + this.cachedToString = "servername=" + + ServerName.getServerName(this.serverAddress.getHostname(), + this.serverAddress.getPort(), this.startCode).toString() + + ", webuiport=" + this.webuiport; + } + return this.cachedToString; } @Override public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; return compareTo((HServerInfo)obj) == 0; } @Override public int hashCode() { - return this.getServerName().hashCode(); + int code = this.serverAddress.hashCode(); + code ^= this.webuiport; + code ^= this.startCode; + return code; } public void readFields(DataInput in) throws IOException { this.serverAddress.readFields(in); this.startCode = in.readLong(); - this.load.readFields(in); - this.infoPort = in.readInt(); - this.hostname = in.readUTF(); + this.webuiport = in.readInt(); } public void write(DataOutput out) throws IOException { this.serverAddress.write(out); out.writeLong(this.startCode); - this.load.write(out); - out.writeInt(this.infoPort); - out.writeUTF(hostname); + out.writeInt(this.webuiport); } public int compareTo(HServerInfo o) { - return this.getServerName().compareTo(o.getServerName()); - } - - /** - * Orders HServerInfos by load then name. Natural/ascending order. - */ - public static class LoadComparator implements Comparator { - @Override - public int compare(HServerInfo left, HServerInfo right) { - int loadCompare = left.getLoad().compareTo(right.getLoad()); - return loadCompare != 0 ? loadCompare : left.compareTo(right); - } - } - - /** - * Utility method that does a find of a servername or a hostandport combination - * in the passed Set. - * @param servers Set of server names - * @param serverName Name to look for - * @param hostAndPortOnly If serverName is a - * hostname ':' port - * or hostname , port , startcode. - * @return True if serverName found in servers - */ - public static boolean isServer(final Set servers, - final String serverName, final boolean hostAndPortOnly) { - if (!hostAndPortOnly) return servers.contains(serverName); - String serverNameColonReplaced = - serverName.replaceFirst(":", SERVERNAME_SEPARATOR); - for (String hostPortStartCode: servers) { - int index = hostPortStartCode.lastIndexOf(SERVERNAME_SEPARATOR); - String hostPortStrippedOfStartCode = hostPortStartCode.substring(0, index); - if (hostPortStrippedOfStartCode.equals(serverNameColonReplaced)) return true; - } - return false; + int compare = this.serverAddress.compareTo(o.getServerAddress()); + if (compare != 0) return compare; + if (this.webuiport != o.getInfoPort()) return this.webuiport - o.getInfoPort(); + if (this.startCode != o.getStartCode()) return (int)(this.startCode - o.getStartCode()); + return 0; } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/HServerLoad.java b/src/main/java/org/apache/hadoop/hbase/HServerLoad.java deleted file mode 100644 index efa7e0e..0000000 --- a/src/main/java/org/apache/hadoop/hbase/HServerLoad.java +++ /dev/null @@ -1,493 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; - -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Strings; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableComparable; - -/** - * This class encapsulates metrics for determining the load on a HRegionServer - */ -public class HServerLoad implements WritableComparable { - /** number of regions */ - // could just use regionLoad.size() but master.RegionManager likes to play - // around with this value while passing HServerLoad objects around during - // balancer calculations - private int numberOfRegions; - /** number of requests since last report */ - private int numberOfRequests; - /** the amount of used heap, in MB */ - private int usedHeapMB; - /** the maximum allowable size of the heap, in MB */ - private int maxHeapMB; - /** per-region load metrics */ - private ArrayList regionLoad = new ArrayList(); - - /** - * Encapsulates per-region loading metrics. - */ - public static class RegionLoad implements Writable { - /** the region name */ - private byte[] name; - /** the number of stores for the region */ - private int stores; - /** the number of storefiles for the region */ - private int storefiles; - /** the current total size of the store files for the region, in MB */ - private int storefileSizeMB; - /** the current size of the memstore for the region, in MB */ - private int memstoreSizeMB; - /** the current total size of storefile indexes for the region, in MB */ - private int storefileIndexSizeMB; - - /** - * Constructor, for Writable - */ - public RegionLoad() { - super(); - } - - /** - * @param name - * @param stores - * @param storefiles - * @param storefileSizeMB - * @param memstoreSizeMB - * @param storefileIndexSizeMB - */ - public RegionLoad(final byte[] name, final int stores, - final int storefiles, final int storefileSizeMB, - final int memstoreSizeMB, final int storefileIndexSizeMB) { - this.name = name; - this.stores = stores; - this.storefiles = storefiles; - this.storefileSizeMB = storefileSizeMB; - this.memstoreSizeMB = memstoreSizeMB; - this.storefileIndexSizeMB = storefileIndexSizeMB; - } - - // Getters - - /** - * @return the region name - */ - public byte[] getName() { - return name; - } - - /** - * @return the region name as a string - */ - public String getNameAsString() { - return Bytes.toString(name); - } - - /** - * @return the number of stores - */ - public int getStores() { - return stores; - } - - /** - * @return the number of storefiles - */ - public int getStorefiles() { - return storefiles; - } - - /** - * @return the total size of the storefiles, in MB - */ - public int getStorefileSizeMB() { - return storefileSizeMB; - } - - /** - * @return the memstore size, in MB - */ - public int getMemStoreSizeMB() { - return memstoreSizeMB; - } - - /** - * @return the approximate size of storefile indexes on the heap, in MB - */ - public int getStorefileIndexSizeMB() { - return storefileIndexSizeMB; - } - - // Setters - - /** - * @param name the region name - */ - public void setName(byte[] name) { - this.name = name; - } - - /** - * @param stores the number of stores - */ - public void setStores(int stores) { - this.stores = stores; - } - - /** - * @param storefiles the number of storefiles - */ - public void setStorefiles(int storefiles) { - this.storefiles = storefiles; - } - - /** - * @param memstoreSizeMB the memstore size, in MB - */ - public void setMemStoreSizeMB(int memstoreSizeMB) { - this.memstoreSizeMB = memstoreSizeMB; - } - - /** - * @param storefileIndexSizeMB the approximate size of storefile indexes - * on the heap, in MB - */ - public void setStorefileIndexSizeMB(int storefileIndexSizeMB) { - this.storefileIndexSizeMB = storefileIndexSizeMB; - } - - // Writable - public void readFields(DataInput in) throws IOException { - int namelen = in.readInt(); - this.name = new byte[namelen]; - in.readFully(this.name); - this.stores = in.readInt(); - this.storefiles = in.readInt(); - this.storefileSizeMB = in.readInt(); - this.memstoreSizeMB = in.readInt(); - this.storefileIndexSizeMB = in.readInt(); - } - - public void write(DataOutput out) throws IOException { - out.writeInt(name.length); - out.write(name); - out.writeInt(stores); - out.writeInt(storefiles); - out.writeInt(storefileSizeMB); - out.writeInt(memstoreSizeMB); - out.writeInt(storefileIndexSizeMB); - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "stores", - Integer.valueOf(this.stores)); - sb = Strings.appendKeyValue(sb, "storefiles", - Integer.valueOf(this.storefiles)); - sb = Strings.appendKeyValue(sb, "storefileSizeMB", - Integer.valueOf(this.storefileSizeMB)); - sb = Strings.appendKeyValue(sb, "memstoreSizeMB", - Integer.valueOf(this.memstoreSizeMB)); - sb = Strings.appendKeyValue(sb, "storefileIndexSizeMB", - Integer.valueOf(this.storefileIndexSizeMB)); - return sb.toString(); - } - } - - /* - * TODO: Other metrics that might be considered when the master is actually - * doing load balancing instead of merely trying to decide where to assign - * a region: - *

    - *
  • # of CPUs, heap size (to determine the "class" of machine). For - * now, we consider them to be homogeneous.
  • - *
  • #requests per region (Map<{String|HRegionInfo}, Integer>)
  • - *
  • #compactions and/or #splits (churn)
  • - *
  • server death rate (maybe there is something wrong with this server)
  • - *
- */ - - /** default constructor (used by Writable) */ - public HServerLoad() { - super(); - } - - /** - * Constructor - * @param numberOfRequests - * @param usedHeapMB - * @param maxHeapMB - */ - public HServerLoad(final int numberOfRequests, final int usedHeapMB, - final int maxHeapMB) { - this.numberOfRequests = numberOfRequests; - this.usedHeapMB = usedHeapMB; - this.maxHeapMB = maxHeapMB; - } - - /** - * Constructor - * @param hsl the template HServerLoad - */ - public HServerLoad(final HServerLoad hsl) { - this(hsl.numberOfRequests, hsl.usedHeapMB, hsl.maxHeapMB); - this.regionLoad.addAll(hsl.regionLoad); - } - - /** - * Originally, this method factored in the effect of requests going to the - * server as well. However, this does not interact very well with the current - * region rebalancing code, which only factors number of regions. For the - * interim, until we can figure out how to make rebalancing use all the info - * available, we're just going to make load purely the number of regions. - * - * @return load factor for this server - */ - public int getLoad() { - // int load = numberOfRequests == 0 ? 1 : numberOfRequests; - // load *= numberOfRegions == 0 ? 1 : numberOfRegions; - // return load; - return numberOfRegions; - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - return toString(1); - } - - /** - * Returns toString() with the number of requests divided by the message - * interval in seconds - * @param msgInterval - * @return The load as a String - */ - public String toString(int msgInterval) { - StringBuilder sb = new StringBuilder(); - sb = Strings.appendKeyValue(sb, "requests", - Integer.valueOf(numberOfRequests/msgInterval)); - sb = Strings.appendKeyValue(sb, "regions", - Integer.valueOf(numberOfRegions)); - sb = Strings.appendKeyValue(sb, "usedHeap", - Integer.valueOf(this.usedHeapMB)); - sb = Strings.appendKeyValue(sb, "maxHeap", Integer.valueOf(maxHeapMB)); - return sb.toString(); - } - - /** - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null) { - return false; - } - if (getClass() != o.getClass()) { - return false; - } - return compareTo((HServerLoad)o) == 0; - } - - /** - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - int result = Integer.valueOf(numberOfRequests).hashCode(); - result ^= Integer.valueOf(numberOfRegions).hashCode(); - return result; - } - - // Getters - - /** - * @return the numberOfRegions - */ - public int getNumberOfRegions() { - return numberOfRegions; - } - - /** - * @return the numberOfRequests - */ - public int getNumberOfRequests() { - return numberOfRequests; - } - - /** - * @return the amount of heap in use, in MB - */ - public int getUsedHeapMB() { - return usedHeapMB; - } - - /** - * @return the maximum allowable heap size, in MB - */ - public int getMaxHeapMB() { - return maxHeapMB; - } - - /** - * @return region load metrics - */ - public Collection getRegionsLoad() { - return Collections.unmodifiableCollection(regionLoad); - } - - /** - * @return Count of storefiles on this regionserver - */ - public int getStorefiles() { - int count = 0; - for (RegionLoad info: regionLoad) - count += info.getStorefiles(); - return count; - } - - /** - * @return Total size of store files in MB - */ - public int getStorefileSizeInMB() { - int count = 0; - for (RegionLoad info: regionLoad) - count += info.getStorefileSizeMB(); - return count; - } - - /** - * @return Size of memstores in MB - */ - public int getMemStoreSizeInMB() { - int count = 0; - for (RegionLoad info: regionLoad) - count += info.getMemStoreSizeMB(); - return count; - } - - /** - * @return Size of store file indexes in MB - */ - public int getStorefileIndexSizeInMB() { - int count = 0; - for (RegionLoad info: regionLoad) - count += info.getStorefileIndexSizeMB(); - return count; - } - - // Setters - - /** - * @param numberOfRegions the number of regions - */ - public void setNumberOfRegions(int numberOfRegions) { - this.numberOfRegions = numberOfRegions; - } - - /** - * @param numberOfRequests the number of requests to set - */ - public void setNumberOfRequests(int numberOfRequests) { - this.numberOfRequests = numberOfRequests; - } - - /** - * @param usedHeapMB the amount of heap in use, in MB - */ - public void setUsedHeapMB(int usedHeapMB) { - this.usedHeapMB = usedHeapMB; - } - - /** - * @param maxHeapMB the maximum allowable heap size, in MB - */ - public void setMaxHeapMB(int maxHeapMB) { - this.maxHeapMB = maxHeapMB; - } - - /** - * @param load Instance of HServerLoad - */ - public void addRegionInfo(final HServerLoad.RegionLoad load) { - this.numberOfRegions++; - this.regionLoad.add(load); - } - - /** - * @param name - * @param stores - * @param storefiles - * @param memstoreSizeMB - * @param storefileIndexSizeMB - * @deprecated Use {@link #addRegionInfo(RegionLoad)} - */ - @Deprecated - public void addRegionInfo(final byte[] name, final int stores, - final int storefiles, final int storefileSizeMB, - final int memstoreSizeMB, final int storefileIndexSizeMB) { - this.regionLoad.add(new HServerLoad.RegionLoad(name, stores, storefiles, - storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB)); - } - - // Writable - - public void readFields(DataInput in) throws IOException { - numberOfRequests = in.readInt(); - usedHeapMB = in.readInt(); - maxHeapMB = in.readInt(); - numberOfRegions = in.readInt(); - for (int i = 0; i < numberOfRegions; i++) { - RegionLoad rl = new RegionLoad(); - rl.readFields(in); - regionLoad.add(rl); - } - } - - public void write(DataOutput out) throws IOException { - out.writeInt(numberOfRequests); - out.writeInt(usedHeapMB); - out.writeInt(maxHeapMB); - out.writeInt(numberOfRegions); - for (int i = 0; i < numberOfRegions; i++) - regionLoad.get(i).write(out); - } - - // Comparable - - public int compareTo(HServerLoad o) { - return this.getLoad() - o.getLoad(); - } -} diff --git a/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index 0d696ab..5bc3bb0 100644 --- a/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -254,12 +254,10 @@ public class LocalHBaseCluster { while (regionServerThread.isAlive()) { try { LOG.info("Waiting on " + - regionServerThread.getRegionServer().getHServerInfo().toString()); + regionServerThread.getRegionServer().toString()); regionServerThread.join(); } catch (InterruptedException e) { e.printStackTrace(); - } catch (IOException e) { - e.printStackTrace(); } } return regionServerThread.getName(); @@ -275,12 +273,10 @@ public class LocalHBaseCluster { while (rst.isAlive()) { try { LOG.info("Waiting on " + - rst.getRegionServer().getHServerInfo().toString()); + rst.getRegionServer().toString()); rst.join(); } catch (InterruptedException e) { e.printStackTrace(); - } catch (IOException e) { - e.printStackTrace(); } } for (int i=0;i<hostname> ',' <port> ',' <startcode>. - * If the master, it returns <hostname> ':' <port>'. - * @return unique server name + * @return The unique server name for this server. */ - public String getServerName(); + public ServerName getServerName(); } \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/ServerName.java b/src/main/java/org/apache/hadoop/hbase/ServerName.java new file mode 100644 index 0000000..6e72402 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/ServerName.java @@ -0,0 +1,205 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.util.Set; + +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Utility handling server names where server names are formatted + * <hostname> '{@link #SERVERNAME_SEPARATOR"}' <port> '{@ink #SERVERNAME_SEPARATOR"}' <startcode>. + * A server name is used uniquely identifying a server instance. The format is + * safe to use in the filesystem and as znode name up in ZooKeeper. + */ +public class ServerName implements Comparable { + /* + * This character is used as separator between server hostname and port and + * its startcode. + */ + static final String SERVERNAME_SEPARATOR = ","; + + private final String servername; + private final String hostname; + private final int port; + private final long startcode; + + public ServerName(final String hostname, final int port, final long startcode) { + this.hostname = hostname; + this.port = port; + this.startcode = startcode; + this.servername = getServerName(hostname, port, startcode); + } + + public ServerName(final String serverName) { + this(parseHostname(serverName), parsePort(serverName), + parseStartcode(serverName)); + } + + public ServerName(final String hostAndPort, final long startCode) { + this(Addressing.parseHostname(hostAndPort), + Addressing.parsePort(hostAndPort), startCode); + } + + public static String parseHostname(final String serverName) { + int index = serverName.indexOf(SERVERNAME_SEPARATOR); + return serverName.substring(0, index); + } + + public static int parsePort(final String serverName) { + String [] split = serverName.split(SERVERNAME_SEPARATOR); + return Integer.parseInt(split[1]); + } + + public static long parseStartcode(final String serverName) { + int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR); + return Long.parseLong(serverName.substring(index + 1)); + } + + @Override + public String toString() { + return this.servername; + } + + public String getServername() { + return servername; + } + + public String getHostname() { + return hostname; + } + + public int getPort() { + return port; + } + + public long getStartcode() { + return startcode; + } + + /** + * @param hostName + * @param port + * @param startcode + * @return Server name made of the concatenation of hostname, port and + * startcode formatted as <hostname> ',' <port> ',' <startcode> + */ + public static String getServerName(String hostName, int port, long startcode) { + StringBuilder name = new StringBuilder(hostName); + name.append(SERVERNAME_SEPARATOR); + name.append(port); + name.append(SERVERNAME_SEPARATOR); + name.append(startcode); + return name.toString(); + } + + /** + * @param hostAndPort String in form of <hostname> ':' <port> + * @param startcode + * @return Server name made of the concatenation of hostname, port and + * startcode formatted as <hostname> ',' <port> ',' <startcode> + */ + public static synchronized String getServerName(final String hostAndPort, + final long startcode) { + int index = hostAndPort.indexOf(":"); + if (index <= 0) throw new IllegalArgumentException("Expected ':' "); + return getServerName(hostAndPort.substring(0, index), + Integer.parseInt(hostAndPort.substring(index + 1)), startcode); + } + + /** + * @return Hostname and port formatted as described at + * {@link Addressing#createHostAndPortStr(String, int)} + */ + public String getHostAndPort() { + return Addressing.createHostAndPortStr(this.hostname, this.port); + } + + /** + * @return This instance serialized to bytes. + */ + public byte [] getBytes() { + return Bytes.toBytes(toString()); + } + + /** + * @param serverName ServerName in form specified by {@link #getServerName()} + * @return The server start code parsed from servername + */ + public static long getServerStartcodeFromServerName(final String serverName) { + int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR); + return Long.parseLong(serverName.substring(index + 1)); + } + + /** + * Utility method that does a find of a servername or a hostandport combination + * in the passed Set. + * @param servers Set of server names where server name is formatted as + * a {@link ServerName}. + * @return True if serverName found in servers + */ + public boolean isServer(final Set servers) { + return isServer(servers, toString(), false); + } + + /** + * Utility method that does a find of a servername or a hostandport combination + * in the passed Set. + * @param servers Set of server names where server name is formatted as + * a {@link ServerName}. + * @param serverName Name to look for formatted as a {@link ServerName} or + * as a hostname ':' port. + * @param hostAndPortOnly True if passed serverName is a + * hostname ':' port, false if its formatted as a + * {@link ServerName}. + * @return True if serverName found in servers + */ + public static boolean isServer(final Set servers, + final String serverName, final boolean hostAndPortOnly) { + if (!hostAndPortOnly) return servers.contains(serverName); + String serverNameColonReplaced = + serverName.replaceFirst(":", SERVERNAME_SEPARATOR); + for (String hostPortStartCode: servers) { + int index = hostPortStartCode.lastIndexOf(SERVERNAME_SEPARATOR); + String hostPortStrippedOfStartCode = hostPortStartCode.substring(0, index); + if (hostPortStrippedOfStartCode.equals(serverNameColonReplaced)) return true; + } + return false; + } + + @Override + public int compareTo(ServerName other) { + return this.servername.compareTo(other.getServername()); + } + + @Override + public int hashCode() { + return this.servername.hashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null) return false; + if (!(o instanceof ServerName)) return false; + return this.compareTo((ServerName)o) == 0; + } +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java b/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java index 14e8fa1..909f0fb 100644 --- a/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java +++ b/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java @@ -23,20 +23,16 @@ import java.nio.ByteBuffer; import java.util.Collection; import java.util.List; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericArray; +import org.apache.avro.generic.GenericData; +import org.apache.avro.util.Utf8; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; -import org.apache.hadoop.hbase.HServerLoad; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.io.hfile.Compression; -import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.avro.generated.AClusterStatus; import org.apache.hadoop.hbase.avro.generated.AColumn; import org.apache.hadoop.hbase.avro.generated.AColumnValue; @@ -46,7 +42,6 @@ import org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor; import org.apache.hadoop.hbase.avro.generated.AGet; import org.apache.hadoop.hbase.avro.generated.AIllegalArgument; import org.apache.hadoop.hbase.avro.generated.APut; -import org.apache.hadoop.hbase.avro.generated.ARegionLoad; import org.apache.hadoop.hbase.avro.generated.AResult; import org.apache.hadoop.hbase.avro.generated.AResultEntry; import org.apache.hadoop.hbase.avro.generated.AScan; @@ -54,11 +49,13 @@ import org.apache.hadoop.hbase.avro.generated.AServerAddress; import org.apache.hadoop.hbase.avro.generated.AServerInfo; import org.apache.hadoop.hbase.avro.generated.AServerLoad; import org.apache.hadoop.hbase.avro.generated.ATableDescriptor; - -import org.apache.avro.Schema; -import org.apache.avro.generic.GenericArray; -import org.apache.avro.generic.GenericData; -import org.apache.avro.util.Utf8; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.io.hfile.Compression; +import org.apache.hadoop.hbase.util.Bytes; public class AvroUtil { @@ -74,52 +71,14 @@ public class AvroUtil { return asa; } - static public ARegionLoad hrlToARL(HServerLoad.RegionLoad rl) throws IOException { - ARegionLoad arl = new ARegionLoad(); - arl.memStoreSizeMB = rl.getMemStoreSizeMB(); - arl.name = ByteBuffer.wrap(rl.getName()); - arl.storefileIndexSizeMB = rl.getStorefileIndexSizeMB(); - arl.storefiles = rl.getStorefiles(); - arl.storefileSizeMB = rl.getStorefileSizeMB(); - arl.stores = rl.getStores(); - return arl; - } - - static public AServerLoad hslToASL(HServerLoad hsl) throws IOException { - AServerLoad asl = new AServerLoad(); - asl.load = hsl.getLoad(); - asl.maxHeapMB = hsl.getMaxHeapMB(); - asl.memStoreSizeInMB = hsl.getMemStoreSizeInMB(); - asl.numberOfRegions = hsl.getNumberOfRegions(); - asl.numberOfRequests = hsl.getNumberOfRequests(); - - Collection regionLoads = hsl.getRegionsLoad(); - Schema s = Schema.createArray(ARegionLoad.SCHEMA$); - GenericData.Array aregionLoads = null; - if (regionLoads != null) { - aregionLoads = new GenericData.Array(regionLoads.size(), s); - for (HServerLoad.RegionLoad rl : regionLoads) { - aregionLoads.add(hrlToARL(rl)); - } - } else { - aregionLoads = new GenericData.Array(0, s); - } - asl.regionsLoad = aregionLoads; - - asl.storefileIndexSizeInMB = hsl.getStorefileIndexSizeInMB(); - asl.storefiles = hsl.getStorefiles(); - asl.storefileSizeInMB = hsl.getStorefileSizeInMB(); - asl.usedHeapMB = hsl.getUsedHeapMB(); - return asl; - } - - static public AServerInfo hsiToASI(HServerInfo hsi) throws IOException { + static public AServerInfo hsiToASI(ServerName hsi) throws IOException { AServerInfo asi = new AServerInfo(); - asi.infoPort = hsi.getInfoPort(); - asi.load = hslToASL(hsi.getLoad()); - asi.serverAddress = hsaToASA(hsi.getServerAddress()); - asi.serverName = new Utf8(hsi.getServerName()); - asi.startCode = hsi.getStartCode(); + asi.infoPort = -1; + asi.load = new AServerLoad(); + asi.serverAddress = + hsaToASA(new HServerAddress(hsi.getHostname(), hsi.getPort())); + asi.serverName = new Utf8(hsi.toString()); + asi.startCode = hsi.getStartcode(); return asi; } @@ -142,19 +101,19 @@ public class AvroUtil { acs.hbaseVersion = new Utf8(cs.getHBaseVersion()); acs.regionsCount = cs.getRegionsCount(); acs.requestsCount = cs.getRequestsCount(); - Collection hserverInfos = cs.getServerInfo(); + Collection hserverInfos = cs.getServers(); Schema s = Schema.createArray(AServerInfo.SCHEMA$); GenericData.Array aserverInfos = null; if (hserverInfos != null) { aserverInfos = new GenericData.Array(hserverInfos.size(), s); - for (HServerInfo hsi : hserverInfos) { + for (ServerName hsi : hserverInfos) { aserverInfos.add(hsiToASI(hsi)); } } else { aserverInfos = new GenericData.Array(0, s); } acs.serverInfos = aserverInfos; - acs.servers = cs.getServers(); + acs.servers = cs.getServersSize(); return acs; } diff --git a/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java b/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java index b291936..6338b12 100644 --- a/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java +++ b/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.ipc.HRegionInterface; @@ -67,7 +68,7 @@ public class CatalogTracker { * server shutdown processing -- we need to know who had .META. last. If you * want to know if the address is good, rely on {@link #metaAvailable} value. */ - private HServerAddress metaLocation; + private ServerName metaLocation; private final int defaultTimeout; private boolean stopped = false; @@ -154,17 +155,18 @@ public class CatalogTracker { /** * Gets the current location for -ROOT- or null if location is * not currently available. - * @return location of root, null if not available + * @return server name * @throws InterruptedException */ - public HServerAddress getRootLocation() throws InterruptedException { + public ServerName getRootLocation() throws InterruptedException { return this.rootRegionTracker.getRootRegionLocation(); } /** - * @return Location of meta or null if not yet available. + * @return Location of server hosting meta region formatted as per + * {@link ServerName}, or null if none available */ - public HServerAddress getMetaLocation() { + public ServerName getMetaLocation() { return this.metaLocation; } @@ -183,18 +185,19 @@ public class CatalogTracker { * for up to the specified timeout if not immediately available. Returns null * if the timeout elapses before root is available. * @param timeout maximum time to wait for root availability, in milliseconds - * @return location of root + * @return Location of server hosting root region, + * or null if none available * @throws InterruptedException if interrupted while waiting * @throws NotAllMetaRegionsOnlineException if root not available before * timeout */ - HServerAddress waitForRoot(final long timeout) + ServerName waitForRoot(final long timeout) throws InterruptedException, NotAllMetaRegionsOnlineException { - HServerAddress address = rootRegionTracker.waitRootRegionLocation(timeout); - if (address == null) { + ServerName sn = rootRegionTracker.waitRootRegionLocation(timeout); + if (sn == null) { throw new NotAllMetaRegionsOnlineException("Timed out; " + timeout + "ms"); } - return address; + return sn; } /** @@ -237,11 +240,11 @@ public class CatalogTracker { */ private HRegionInterface getRootServerConnection() throws IOException, InterruptedException { - HServerAddress address = this.rootRegionTracker.getRootRegionLocation(); - if (address == null) { + ServerName sn = this.rootRegionTracker.getRootRegionLocation(); + if (sn == null) { return null; } - return getCachedConnection(address); + return getCachedConnection(sn); } /** @@ -277,7 +280,7 @@ public class CatalogTracker { if (rootConnection == null) { return null; } - HServerAddress newLocation = MetaReader.readMetaLocation(rootConnection); + ServerName newLocation = MetaReader.readMetaLocation(rootConnection); if (newLocation == null) { return null; } @@ -316,7 +319,7 @@ public class CatalogTracker { * @throws NotAllMetaRegionsOnlineException if meta not available before * timeout */ - public HServerAddress waitForMeta(long timeout) + public ServerName waitForMeta(long timeout) throws InterruptedException, IOException, NotAllMetaRegionsOnlineException { long stop = System.currentTimeMillis() + timeout; synchronized (metaAvailable) { @@ -371,18 +374,21 @@ public class CatalogTracker { this.metaAvailable.set(false); } - private void setMetaLocation(HServerAddress metaLocation) { + private void setMetaLocation(final ServerName metaLocation) { metaAvailable.set(true); this.metaLocation = metaLocation; // no synchronization because these are private and already under lock - metaAvailable.notifyAll(); + this.metaAvailable.notifyAll(); } - private HRegionInterface getCachedConnection(HServerAddress address) + private HRegionInterface getCachedConnection(ServerName sn) throws IOException { HRegionInterface protocol = null; try { - protocol = connection.getHRegionConnection(address, false); + // TODO: Remove. Its silly making an HSA. Just pass host and port and + // let connection figure it out. + HServerAddress hsa = new HServerAddress(sn.getHostname(), sn.getPort()); + protocol = connection.getHRegionConnection(hsa, false); } catch (RetriesExhaustedException e) { if (e.getCause() != null && e.getCause() instanceof ConnectException) { // Catch this; presume it means the cached connection has gone bad. @@ -391,7 +397,7 @@ public class CatalogTracker { } } catch (SocketTimeoutException e) { // We were passed the wrong address. Return 'protocol' == null. - LOG.debug("Timed out connecting to " + address); + LOG.debug("Timed out connecting to " + sn); } catch (IOException ioe) { Throwable cause = ioe.getCause(); if (cause != null && cause instanceof EOFException) { @@ -408,7 +414,7 @@ public class CatalogTracker { } private boolean verifyRegionLocation(HRegionInterface metaServer, - final HServerAddress address, + final ServerName address, byte [] regionName) throws IOException { if (metaServer == null) { @@ -465,7 +471,8 @@ public class CatalogTracker { throw e; } return (connection == null)? false: - verifyRegionLocation(connection,this.rootRegionTracker.getRootRegionLocation(), + verifyRegionLocation(connection, + this.rootRegionTracker.getRootRegionLocation(), HRegionInfo.ROOT_REGIONINFO.getRegionName()); } @@ -489,4 +496,4 @@ public class CatalogTracker { public HConnection getConnection() { return this.connection; } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java b/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java index c2ee031..5d61607 100644 --- a/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java +++ b/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java @@ -26,8 +26,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.ipc.HRegionInterface; @@ -87,18 +87,17 @@ public class MetaEditor { } public static void addDaughter(final CatalogTracker catalogTracker, - final HRegionInfo regionInfo, final HServerInfo serverInfo) + final HRegionInfo regionInfo, final ServerName sn) throws NotAllMetaRegionsOnlineException, IOException { HRegionInterface server = catalogTracker.waitForMetaServerConnectionDefault(); byte [] catalogRegionName = CatalogTracker.META_REGION; Put put = new Put(regionInfo.getRegionName()); addRegionInfo(put, regionInfo); - if (serverInfo != null) addLocation(put, serverInfo); + if (sn != null) addLocation(put, sn); server.put(catalogRegionName, put); LOG.info("Added daughter " + regionInfo.getRegionNameAsString() + " in region " + Bytes.toString(catalogRegionName) + - (serverInfo == null? - ", serverInfo=null": ", serverInfo=" + serverInfo.getServerName())); + (sn == null? ", serverName=null": ", serverName=" + sn.toString())); } /** @@ -110,18 +109,18 @@ public class MetaEditor { * * @param catalogTracker catalog tracker * @param regionInfo region to update location of - * @param serverInfo server the region is located on + * @param sn Server name * @throws IOException * @throws ConnectException Usually because the regionserver carrying .META. * is down. * @throws NullPointerException Because no -ROOT- server connection */ public static void updateMetaLocation(CatalogTracker catalogTracker, - HRegionInfo regionInfo, HServerInfo serverInfo) + HRegionInfo regionInfo, ServerName sn) throws IOException, ConnectException { HRegionInterface server = catalogTracker.waitForRootServerConnectionDefault(); if (server == null) throw new IOException("No server for -ROOT-"); - updateLocation(server, CatalogTracker.ROOT_REGION, regionInfo, serverInfo); + updateLocation(server, CatalogTracker.ROOT_REGION, regionInfo, sn); } /** @@ -133,14 +132,14 @@ public class MetaEditor { * * @param catalogTracker catalog tracker * @param regionInfo region to update location of - * @param serverInfo server the region is located on + * @param sn Server name * @throws IOException */ public static void updateRegionLocation(CatalogTracker catalogTracker, - HRegionInfo regionInfo, HServerInfo serverInfo) + HRegionInfo regionInfo, ServerName sn) throws IOException { updateLocation(catalogTracker.waitForMetaServerConnectionDefault(), - CatalogTracker.META_REGION, regionInfo, serverInfo); + CatalogTracker.META_REGION, regionInfo, sn); } /** @@ -152,20 +151,19 @@ public class MetaEditor { * @param server connection to server hosting catalog region * @param catalogRegionName name of catalog region being updated * @param regionInfo region to update location of - * @param serverInfo server the region is located on + * @param sn Server name * @throws IOException In particular could throw {@link java.net.ConnectException} * if the server is down on other end. */ private static void updateLocation(HRegionInterface server, - byte [] catalogRegionName, HRegionInfo regionInfo, HServerInfo serverInfo) + byte [] catalogRegionName, HRegionInfo regionInfo, ServerName sn) throws IOException { Put put = new Put(regionInfo.getRegionName()); - addLocation(put, serverInfo); + addLocation(put, sn); server.put(catalogRegionName, put); LOG.info("Updated row " + regionInfo.getRegionNameAsString() + " in region " + Bytes.toString(catalogRegionName) + " with " + - "server=" + serverInfo.getHostnamePort() + ", " + - "startcode=" + serverInfo.getStartCode()); + "serverName=" + sn.toString()); } /** @@ -228,11 +226,11 @@ public class MetaEditor { return p; } - private static Put addLocation(final Put p, final HServerInfo hsi) { + private static Put addLocation(final Put p, final ServerName sn) { p.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(hsi.getHostnamePort())); + Bytes.toBytes(sn.getHostAndPort())); p.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, - Bytes.toBytes(hsi.getStartCode())); + Bytes.toBytes(sn.getStartcode())); return p; } } diff --git a/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java b/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java index efa677b..fe00136 100644 --- a/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java +++ b/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java @@ -30,11 +30,10 @@ import java.util.TreeSet; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -125,10 +124,11 @@ public class MetaReader { * to META. If the region does not have an assignment it will have a null * value in the map. * - * @return map of regions to their currently assigned server + * @return map of regions to their currently assigned server where server is + * a String of <host> ':' <port> * @throws IOException */ - public static Map fullScan( + public static Map fullScan( CatalogTracker catalogTracker) throws IOException { return fullScan(catalogTracker, new TreeSet()); @@ -147,7 +147,7 @@ public class MetaReader { * @return map of regions to their currently assigned server * @throws IOException */ - public static Map fullScan( + public static Map fullScan( CatalogTracker catalogTracker, final Set disabledTables) throws IOException { return fullScan(catalogTracker, disabledTables, false); @@ -168,17 +168,17 @@ public class MetaReader { * @return map of regions to their currently assigned server * @throws IOException */ - public static Map fullScan( + public static Map fullScan( CatalogTracker catalogTracker, final Set disabledTables, final boolean excludeOfflinedSplitParents) throws IOException { - final Map regions = - new TreeMap(); + final Map regions = + new TreeMap(); Visitor v = new Visitor() { @Override public boolean visit(Result r) throws IOException { if (r == null || r.isEmpty()) return true; - Pair region = metaRowToRegionPair(r); + Pair region = metaRowToRegionPair(r); if (region == null) return true; HRegionInfo hri = region.getFirst(); if (disabledTables.contains( @@ -199,8 +199,6 @@ public class MetaReader { * Returns a map of every region to it's currently assigned server, according * to META. If the region does not have an assignment it will have a null * value in the map. - *

- * Returns HServerInfo which includes server startcode. * * @return map of regions to their currently assigned server * @throws IOException @@ -273,10 +271,10 @@ public class MetaReader { /** * Reads the location of META from ROOT. * @param metaServer connection to server hosting ROOT - * @return location of META in ROOT, null if not available + * @return location of META in ROOT where location, or null if not available * @throws IOException */ - public static HServerAddress readMetaLocation(HRegionInterface metaServer) + public static ServerName readMetaLocation(HRegionInterface metaServer) throws IOException { return readLocation(metaServer, CatalogTracker.ROOT_REGION, CatalogTracker.META_REGION); @@ -286,10 +284,10 @@ public class MetaReader { * Reads the location of the specified region from META. * @param catalogTracker * @param regionName region to read location of - * @return location of region in META, null if not available + * @return location of META in ROOT where location is, or null if not available * @throws IOException */ - public static HServerAddress readRegionLocation(CatalogTracker catalogTracker, + public static ServerName readRegionLocation(CatalogTracker catalogTracker, byte [] regionName) throws IOException { if (isMetaRegion(regionName)) throw new IllegalArgumentException("See readMetaLocation"); @@ -297,14 +295,17 @@ public class MetaReader { CatalogTracker.META_REGION, regionName); } - private static HServerAddress readLocation(HRegionInterface metaServer, + private static ServerName readLocation(HRegionInterface metaServer, byte [] catalogRegionName, byte [] regionName) throws IOException { Result r = null; try { r = metaServer.get(catalogRegionName, - new Get(regionName).addColumn(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER)); + new Get(regionName). + addColumn(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER). + addColumn(HConstants.CATALOG_FAMILY, + HConstants.STARTCODE_QUALIFIER)); } catch (java.net.SocketTimeoutException e) { // Treat this exception + message as unavailable catalog table. Catch it // and fall through to return a null @@ -339,78 +340,57 @@ public class MetaReader { if (r == null || r.isEmpty()) { return null; } - byte [] value = r.getValue(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER); - return new HServerAddress(Bytes.toString(value)); + return getServerNameFromResult(r); } /** * Gets the region info and assignment for the specified region from META. * @param catalogTracker * @param regionName - * @return region info and assignment from META, null if not available + * @return location of META in ROOT where location is + * a String of <host> ':' <port>, or null if not available * @throws IOException */ - public static Pair getRegion( + public static Pair getRegion( CatalogTracker catalogTracker, byte [] regionName) throws IOException { Get get = new Get(regionName); get.addFamily(HConstants.CATALOG_FAMILY); byte [] meta = getCatalogRegionNameForRegion(regionName); Result r = catalogTracker.waitForMetaServerConnectionDefault().get(meta, get); - if(r == null || r.isEmpty()) { - return null; - } - return metaRowToRegionPair(r); + return (r == null || r.isEmpty())? null: metaRowToRegionPair(r); } /** * @param data A .META. table row. - * @return A pair of the regioninfo and the server address from data - * or null for server address if no address set in .META. or null for a result - * if no HRegionInfo found. + * @return A pair of the regioninfo and the ServerName + * (or null for server address if no address set in .META.). * @throws IOException */ - public static Pair metaRowToRegionPair( - Result data) throws IOException { - byte [] bytes = - data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + public static Pair metaRowToRegionPair(Result data) + throws IOException { + byte [] bytes = data.getValue(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER); if (bytes == null) return null; HRegionInfo info = Writables.getHRegionInfo(bytes); - final byte[] value = data.getValue(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER); - if (value != null && value.length > 0) { - HServerAddress server = new HServerAddress(Bytes.toString(value)); - return new Pair(info, server); - } else { - return new Pair(info, null); - } + ServerName sn = getServerNameFromResult(data); + // sn can be null in case where no server inof. + return new Pair(info, sn); } /** - * @param data A .META. table row. - * @return A pair of the regioninfo and the server info from data - * (or null for server address if no address set in .META.). - * @throws IOException + * @param data Result to interrogate. + * @return A ServerName instance or null if necessary fields not found or empty. */ - public static Pair metaRowToRegionPairWithInfo( - Result data) throws IOException { - byte [] bytes = data.getValue(HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER); - if (bytes == null) return null; - HRegionInfo info = Writables.getHRegionInfo(bytes); - final byte[] value = data.getValue(HConstants.CATALOG_FAMILY, + private static ServerName getServerNameFromResult(final Result data) { + byte[] value = data.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); - if (value != null && value.length > 0) { - final long startCode = Bytes.toLong(data.getValue(HConstants.CATALOG_FAMILY, - HConstants.STARTCODE_QUALIFIER)); - HServerAddress server = new HServerAddress(Bytes.toString(value)); - HServerInfo hsi = new HServerInfo(server, startCode, 0, - server.getHostname()); - return new Pair(info, hsi); - } else { - return new Pair(info, null); - } + if (value == null || value.length == 0) return null; + String hostAndPort = Bytes.toString(value); + value = data.getValue(HConstants.CATALOG_FAMILY, + HConstants.STARTCODE_QUALIFIER); + if (value == null || value.length == 0) return null; + return new ServerName(hostAndPort, Bytes.toLong(value)); } /** @@ -525,26 +505,27 @@ public class MetaReader { /** * @param catalogTracker * @param tableName - * @return Return list of regioninfos and server addresses. + * @return Return list of regioninfos and server. * @throws IOException * @throws InterruptedException */ - public static List> + public static List> getTableRegionsAndLocations(CatalogTracker catalogTracker, String tableName) throws IOException, InterruptedException { byte [] tableNameBytes = Bytes.toBytes(tableName); if (Bytes.equals(tableNameBytes, HConstants.ROOT_TABLE_NAME)) { // If root, do a bit of special handling. - HServerAddress hsa = catalogTracker.getRootLocation(); - List> list = - new ArrayList>(); - list.add(new Pair(HRegionInfo.ROOT_REGIONINFO, hsa)); + ServerName serverName = catalogTracker.getRootLocation(); + List> list = + new ArrayList>(); + list.add(new Pair(HRegionInfo.ROOT_REGIONINFO, + serverName)); return list; } HRegionInterface metaServer = getCatalogRegionInterface(catalogTracker, tableNameBytes); - List> regions = - new ArrayList>(); + List> regions = + new ArrayList>(); byte[] firstRowInTable = Bytes.toBytes(tableName + ",,"); Scan scan = new Scan(firstRowInTable); scan.addFamily(HConstants.CATALOG_FAMILY); @@ -554,7 +535,7 @@ public class MetaReader { Result data; while((data = metaServer.next(scannerid)) != null) { if (data != null && data.size() > 0) { - Pair region = metaRowToRegionPair(data); + Pair region = metaRowToRegionPair(data); if (region == null) continue; if (region.getFirst().getTableDesc().getNameAsString().equals( tableName)) { @@ -578,7 +559,7 @@ public class MetaReader { * @throws IOException */ public static NavigableMap - getServerUserRegions(CatalogTracker catalogTracker, final HServerInfo hsi) + getServerUserRegions(CatalogTracker catalogTracker, final ServerName serverName) throws IOException { HRegionInterface metaServer = catalogTracker.waitForMetaServerConnectionDefault(); @@ -591,10 +572,9 @@ public class MetaReader { Result result; while((result = metaServer.next(scannerid)) != null) { if (result != null && result.size() > 0) { - Pair pair = - metaRowToRegionPairWithInfo(result); + Pair pair = metaRowToRegionPair(result); if (pair == null) continue; - if (pair.getSecond() == null || !pair.getSecond().equals(hsi)) { + if (pair.getSecond() == null || !serverName.equals(pair.getSecond())) { continue; } hris.put(pair.getFirst(), result); diff --git a/src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java b/src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java index aee64c5..1cbf1b6 100644 --- a/src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java +++ b/src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java @@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.catalog; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -53,11 +53,11 @@ public class RootLocationEditor { * Sets the location of -ROOT- in ZooKeeper to the * specified server address. * @param zookeeper zookeeper reference - * @param location server address hosting root + * @param location The server hosting -ROOT- * @throws KeeperException unexpected zookeeper exception */ public static void setRootLocation(ZooKeeperWatcher zookeeper, - HServerAddress location) + final ServerName location) throws KeeperException { LOG.info("Setting ROOT region location in ZooKeeper as " + location); try { @@ -69,4 +69,4 @@ public class RootLocationEditor { Bytes.toBytes(location.toString())); } } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 82993a7..c37f67b 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.RegionException; import org.apache.hadoop.hbase.RemoteExceptionHandler; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; @@ -781,18 +782,15 @@ public class HBaseAdmin implements Abortable { CatalogTracker ct = getCatalogTracker(); try { if (hostAndPort != null) { - HServerAddress hsa = new HServerAddress(hostAndPort); - Pair pair = - MetaReader.getRegion(ct, regionname); + Pair pair = MetaReader.getRegion(ct, regionname); if (pair == null || pair.getSecond() == null) { LOG.info("No server in .META. for " + Bytes.toString(regionname) + "; pair=" + pair); } else { - closeRegion(hsa, pair.getFirst()); + closeRegion(pair.getSecond(), pair.getFirst()); } } else { - Pair pair = - MetaReader.getRegion(ct, regionname); + Pair pair = MetaReader.getRegion(ct, regionname); if (pair == null || pair.getSecond() == null) { LOG.info("No server in .META. for " + Bytes.toString(regionname) + "; pair=" + pair); @@ -805,8 +803,9 @@ public class HBaseAdmin implements Abortable { } } - private void closeRegion(final HServerAddress hsa, final HRegionInfo hri) + private void closeRegion(final ServerName sn, final HRegionInfo hri) throws IOException { + HServerAddress hsa = new HServerAddress(sn.getHostname(), sn.getPort()); HRegionInterface rs = this.connection.getHRegionConnection(hsa); // Close the region without updating zk state. rs.closeRegion(hri, false); @@ -839,7 +838,7 @@ public class HBaseAdmin implements Abortable { CatalogTracker ct = getCatalogTracker(); try { if (isRegionName) { - Pair pair = + Pair pair = MetaReader.getRegion(getCatalogTracker(), tableNameOrRegionName); if (pair == null || pair.getSecond() == null) { LOG.info("No server in .META. for " + @@ -848,10 +847,10 @@ public class HBaseAdmin implements Abortable { flush(pair.getSecond(), pair.getFirst()); } } else { - List> pairs = + List> pairs = MetaReader.getTableRegionsAndLocations(getCatalogTracker(), Bytes.toString(tableNameOrRegionName)); - for (Pair pair: pairs) { + for (Pair pair: pairs) { if (pair.getSecond() == null) continue; flush(pair.getSecond(), pair.getFirst()); } @@ -861,8 +860,9 @@ public class HBaseAdmin implements Abortable { } } - private void flush(final HServerAddress hsa, final HRegionInfo hri) + private void flush(final ServerName sn, final HRegionInfo hri) throws IOException { + HServerAddress hsa = new HServerAddress(sn.getHostname(), sn.getPort()); HRegionInterface rs = this.connection.getHRegionConnection(hsa); rs.flushRegion(hri); } @@ -933,7 +933,7 @@ public class HBaseAdmin implements Abortable { CatalogTracker ct = getCatalogTracker(); try { if (isRegionName(tableNameOrRegionName)) { - Pair pair = + Pair pair = MetaReader.getRegion(ct, tableNameOrRegionName); if (pair == null || pair.getSecond() == null) { LOG.info("No server in .META. for " + @@ -942,10 +942,10 @@ public class HBaseAdmin implements Abortable { compact(pair.getSecond(), pair.getFirst(), major); } } else { - List> pairs = + List> pairs = MetaReader.getTableRegionsAndLocations(ct, Bytes.toString(tableNameOrRegionName)); - for (Pair pair: pairs) { + for (Pair pair: pairs) { if (pair.getSecond() == null) continue; compact(pair.getSecond(), pair.getFirst(), major); } @@ -955,9 +955,10 @@ public class HBaseAdmin implements Abortable { } } - private void compact(final HServerAddress hsa, final HRegionInfo hri, + private void compact(final ServerName sn, final HRegionInfo hri, final boolean major) throws IOException { + HServerAddress hsa = new HServerAddress(sn.getHostname(), sn.getPort()); HRegionInterface rs = this.connection.getHRegionConnection(hsa); rs.compactRegion(hri, major); } @@ -971,7 +972,7 @@ public class HBaseAdmin implements Abortable { * @param destServerName The servername of the destination regionserver. If * passed the empty byte array we'll assign to a random server. A server name * is made of host, port and startcode. Here is an example: - * host187.example.com,60020,1289493121758. + * host187.example.com,60020,1289493121758 * @throws UnknownRegionException Thrown if we can't find a region named * encodedRegionName * @throws ZooKeeperConnectionException @@ -1079,7 +1080,7 @@ public class HBaseAdmin implements Abortable { try { if (isRegionName(tableNameOrRegionName)) { // Its a possible region name. - Pair pair = + Pair pair = MetaReader.getRegion(getCatalogTracker(), tableNameOrRegionName); if (pair == null || pair.getSecond() == null) { LOG.info("No server in .META. for " + @@ -1088,10 +1089,10 @@ public class HBaseAdmin implements Abortable { split(pair.getSecond(), pair.getFirst(), splitPoint); } } else { - List> pairs = + List> pairs = MetaReader.getTableRegionsAndLocations(getCatalogTracker(), Bytes.toString(tableNameOrRegionName)); - for (Pair pair: pairs) { + for (Pair pair: pairs) { // May not be a server for a particular row if (pair.getSecond() == null) continue; HRegionInfo r = pair.getFirst(); @@ -1108,8 +1109,9 @@ public class HBaseAdmin implements Abortable { } } - private void split(final HServerAddress hsa, final HRegionInfo hri, + private void split(final ServerName sn, final HRegionInfo hri, byte[] splitPoint) throws IOException { + HServerAddress hsa = new HServerAddress(sn.getHostname(), sn.getPort()); HRegionInterface rs = this.connection.getHRegionConnection(hsa); rs.splitRegion(hri, splitPoint); } diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index d8a2fc3..581ca0a 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -199,8 +199,8 @@ public interface HConnection extends Abortable { * @return proxy for HRegionServer * @throws IOException if a remote or network exception occurs */ - public HRegionInterface getHRegionConnection( - HServerAddress regionServer, boolean getMaster) + public HRegionInterface getHRegionConnection(HServerAddress regionServer, + boolean getMaster) throws IOException; /** diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 1456b71..de200f3 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -22,8 +22,16 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.lang.reflect.Proxy; import java.lang.reflect.UndeclaredThrowableException; -import java.util.*; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArraySet; @@ -45,11 +53,17 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterAddressTracker; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.RemoteExceptionHandler; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.ipc.*; +import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; +import org.apache.hadoop.hbase.ipc.ExecRPCInvoker; +import org.apache.hadoop.hbase.ipc.HBaseRPC; +import org.apache.hadoop.hbase.ipc.HMasterInterface; +import org.apache.hadoop.hbase.ipc.HRegionInterface; +import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.SoftValueSortedMap; @@ -333,7 +347,7 @@ public class HConnectionManager { } } - HServerAddress masterLocation = null; + ServerName sn = null; synchronized (this.masterLock) { for (int tries = 0; !this.closed && @@ -342,15 +356,16 @@ public class HConnectionManager { tries++) { try { - masterLocation = masterAddressTracker.getMasterAddress(); - if(masterLocation == null) { + sn = masterAddressTracker.getMasterAddress(); + if (sn == null) { LOG.info("ZooKeeper available but no active master location found"); throw new MasterNotRunningException(); } - + InetSocketAddress isa = + new InetSocketAddress(sn.getHostname(), sn.getPort()); HMasterInterface tryMaster = (HMasterInterface)HBaseRPC.getProxy( - HMasterInterface.class, HMasterInterface.VERSION, - masterLocation.getInetSocketAddress(), this.conf, this.rpcTimeout); + HMasterInterface.class, HMasterInterface.VERSION, isa, this.conf, + this.rpcTimeout); if (tryMaster.isMasterRunning()) { this.master = tryMaster; @@ -381,10 +396,10 @@ public class HConnectionManager { this.masterChecked = true; } if (this.master == null) { - if (masterLocation == null) { + if (sn == null) { throw new MasterNotRunningException(); } - throw new MasterNotRunningException(masterLocation.toString()); + throw new MasterNotRunningException(sn.toString()); } return this.master; } @@ -565,12 +580,13 @@ public class HConnectionManager { if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { try { - HServerAddress hsa = + ServerName servername = this.rootRegionTracker.waitRootRegionLocation(this.rpcTimeout); LOG.debug("Lookedup root region location, connection=" + this + - "; hsa=" + hsa); - if (hsa == null) return null; - return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa); + "; serverName=" + ((servername == null)? "": servername.toString())); + if (servername == null) return null; + return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, + new HServerAddress(servername.getHostname(), servername.getPort())); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return null; @@ -619,11 +635,12 @@ public class HConnectionManager { if (value == null) { return true; // don't cache it } - final String serverAddress = Bytes.toString(value); - + final String hostAndPort = Bytes.toString(value); + String hostname = Addressing.parseHostname(hostAndPort); + int port = Addressing.parsePort(hostAndPort); // instantiate the location HRegionLocation loc = new HRegionLocation(regionInfo, - new HServerAddress(serverAddress)); + new HServerAddress(hostname, port)); // cache this meta entry cacheLocation(tableName, loc); } @@ -677,8 +694,7 @@ public class HConnectionManager { metaLocation = locateRegion(parentTable, metaKey); // If null still, go around again. if (metaLocation == null) continue; - HRegionInterface server = - getHRegionConnection(metaLocation.getServerAddress()); + HRegionInterface server = getHRegionConnection(metaLocation.getServerAddress()); Result regionInfoRow = null; // This block guards against two threads trying to load the meta @@ -713,7 +729,7 @@ public class HConnectionManager { if (regionInfoRow == null) { throw new TableNotFoundException(Bytes.toString(tableName)); } - byte[] value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY, + byte [] value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); if (value == null || value.length == 0) { throw new IOException("HRegionInfo was null or empty in " + @@ -734,19 +750,20 @@ public class HConnectionManager { value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); - String serverAddress = ""; - if(value != null) { - serverAddress = Bytes.toString(value); + String hostAndPort = ""; + if (value != null) { + hostAndPort = Bytes.toString(value); } - if (serverAddress.equals("")) { + if (hostAndPort.equals("")) { throw new NoServerForRegionException("No server address listed " + "in " + Bytes.toString(parentTable) + " for region " + regionInfo.getRegionNameAsString()); } - // instantiate the location - location = new HRegionLocation(regionInfo, - new HServerAddress(serverAddress)); + // Instantiate the location + String hostname = Addressing.parseHostname(hostAndPort); + int port = Addressing.parsePort(hostAndPort); + location = new HRegionLocation(regionInfo, new HServerAddress(hostname, port)); cacheLocation(tableName, location); return location; } catch (TableNotFoundException e) { @@ -932,39 +949,44 @@ public class HConnectionManager { } } - public HRegionInterface getHRegionConnection( - HServerAddress regionServer, boolean getMaster) + public HRegionInterface getHRegionConnection(HServerAddress hsa) throws IOException { - if (getMaster) { - getMaster(); - } + return getHRegionConnection(hsa, false); + } + + public HRegionInterface getHRegionConnection(HServerAddress hsa, boolean master) + throws IOException { + return getHRegionConnection(hsa.getInetSocketAddress(), master); + } + + // TODO: CHange getHRegionConnection to take InetSocketAddress and not + // HSA -- St.Ack + + public HRegionInterface getHRegionConnection(final InetSocketAddress isa, + final boolean master) + throws IOException { + if (master) getMaster(); HRegionInterface server; synchronized (this.servers) { // See if we already have a connection - server = this.servers.get(regionServer.toString()); + server = this.servers.get(isa.toString()); if (server == null) { // Get a connection try { server = (HRegionInterface)HBaseRPC.waitForProxy( serverInterfaceClass, HRegionInterface.VERSION, - regionServer.getInetSocketAddress(), this.conf, + isa, this.conf, this.maxRPCAttempts, this.rpcTimeout, this.rpcTimeout); } catch (RemoteException e) { LOG.warn("RemoteException connecting to RS", e); // Throw what the RemoteException was carrying. throw RemoteExceptionHandler.decodeRemoteException(e); } - this.servers.put(regionServer.toString(), server); + this.servers.put(isa.toString(), server); } } return server; } - public HRegionInterface getHRegionConnection( - HServerAddress regionServer) - throws IOException { - return getHRegionConnection(regionServer, false); - } - /** * Get the ZooKeeper instance for this TableServers instance. * @@ -1193,17 +1215,17 @@ public class HConnectionManager { Row row = workingList.get(i); if (row != null) { HRegionLocation loc = locateRegion(tableName, row.getRow(), true); - HServerAddress address = loc.getServerAddress(); + HServerAddress a = loc.getServerAddress(); byte[] regionName = loc.getRegionInfo().getRegionName(); - MultiAction actions = actionsByServer.get(address); + MultiAction actions = actionsByServer.get(a); if (actions == null) { actions = new MultiAction(); - actionsByServer.put(address, actions); + actionsByServer.put(a, actions); } Action action = new Action(regionName, row, i); - lastServers[i] = address; + lastServers[i] = a; actions.add(regionName, action); } } @@ -1214,8 +1236,7 @@ public class HConnectionManager { new HashMap>( actionsByServer.size()); - for (Entry> e - : actionsByServer.entrySet()) { + for (Entry> e: actionsByServer.entrySet()) { futures.put(e.getKey(), pool.submit(createCallable(e.getKey(), e.getValue(), tableName))); } @@ -1397,7 +1418,7 @@ public class HConnectionManager { final Map regions) { for (Map.Entry e : regions.entrySet()) { cacheLocation(tableName, - new HRegionLocation(e.getKey(), e.getValue())); + new HRegionLocation(e.getKey(), e.getValue())); } } diff --git a/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/src/main/java/org/apache/hadoop/hbase/client/HTable.java index a5a90db..a5eec3a 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; import org.apache.hadoop.hbase.ipc.ExecRPCInvoker; +import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Writables; @@ -408,8 +409,8 @@ public class HTable implements HTableInterface { byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); if (value != null && value.length > 0) { - String address = Bytes.toString(value); - server = new HServerAddress(address); + String hostAndPort = Bytes.toString(value); + server = new HServerAddress(Addressing.createInetSocketAddressFromHostAndPortStr(hostAndPort)); } if (!(info.isOffline() || info.isSplit())) { diff --git a/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java b/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java index 6c62024..f30bf00 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java +++ b/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java @@ -50,7 +50,7 @@ public class RetriesExhaustedWithDetailsException extends RetriesExhaustedExcept List addresses) { super("Failed " + exceptions.size() + " action" + pluralize(exceptions) + ": " + - getDesc(exceptions,actions,addresses)); + getDesc(exceptions, actions, addresses)); this.exceptions = exceptions; this.actions = actions; diff --git a/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java index 9576c48..d30a5ad 100644 --- a/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java +++ b/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java @@ -22,8 +22,8 @@ package org.apache.hadoop.hbase.coprocessor; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.UnknownRegionException; import java.io.IOException; @@ -111,13 +111,13 @@ public class BaseMasterObserver implements MasterObserver { @Override public void preMove(MasterCoprocessorEnvironment env, HRegionInfo region, - HServerInfo srcServer, HServerInfo destServer) + ServerName srcServer, ServerName destServer) throws UnknownRegionException { } @Override public void postMove(MasterCoprocessorEnvironment env, HRegionInfo region, - HServerInfo srcServer, HServerInfo destServer) + ServerName srcServer, ServerName destServer) throws UnknownRegionException { } diff --git a/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index db0870b..8e4a561 100644 --- a/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -138,14 +138,14 @@ public interface MasterObserver extends Coprocessor { * Called prior to moving a given region from one region server to another. */ void preMove(MasterCoprocessorEnvironment env, final HRegionInfo region, - final HServerInfo srcServer, final HServerInfo destServer) + final ServerName srcServer, final ServerName destServer) throws UnknownRegionException; /** * Called after the region move has been requested. */ void postMove(MasterCoprocessorEnvironment env, final HRegionInfo region, - final HServerInfo srcServer, final HServerInfo destServer) + final ServerName srcServer, final ServerName destServer) throws UnknownRegionException; /** diff --git a/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java index b48b390..27f3e55 100644 --- a/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java +++ b/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java @@ -221,4 +221,4 @@ public abstract class EventHandler implements Runnable, Comparable { public synchronized void setListener(EventHandlerListener listener) { this.listener = listener; } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java b/src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java index 5e3cc27..c6c0619 100644 --- a/src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java +++ b/src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java @@ -23,6 +23,7 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; @@ -42,7 +43,7 @@ public class RegionTransitionData implements Writable { private byte [] regionName; /** Server event originated from. Optional. */ - private String serverName; + private ServerName origin; /** Time the event was created. Required but automatically set. */ private long stamp; @@ -86,14 +87,15 @@ public class RegionTransitionData implements Writable { * * @param eventType type of event * @param regionName name of region as per HRegionInfo#getRegionName() - * @param serverName name of server setting data + * @param origin Name of originating server; can be a Master host+port or + * a RegionServer name formatted as per {@link ServerName} */ public RegionTransitionData(EventType eventType, byte [] regionName, - String serverName) { + final ServerName origin) { this.eventType = eventType; this.stamp = System.currentTimeMillis(); this.regionName = regionName; - this.serverName = serverName; + this.origin = origin; } /** @@ -129,8 +131,8 @@ public class RegionTransitionData implements Writable { * * @return server name of originating regionserver, or null if from master */ - public String getServerName() { - return serverName; + public ServerName getOrigin() { + return origin; } /** @@ -152,10 +154,8 @@ public class RegionTransitionData implements Writable { regionName = Bytes.readByteArray(in); // remaining fields are optional so prefixed with boolean // the name of the regionserver sending the data - if(in.readBoolean()) { - serverName = in.readUTF(); - } else { - serverName = null; + if (in.readBoolean()) { + this.origin = new ServerName(in.readUTF()); } } @@ -165,9 +165,9 @@ public class RegionTransitionData implements Writable { out.writeLong(System.currentTimeMillis()); Bytes.writeByteArray(out, regionName); // remaining fields are optional so prefixed with boolean - out.writeBoolean(serverName != null); - if(serverName != null) { - out.writeUTF(serverName); + out.writeBoolean(this.origin != null); + if(this.origin != null) { + out.writeUTF(this.origin.toString()); } } @@ -204,7 +204,7 @@ public class RegionTransitionData implements Writable { @Override public String toString() { - return "region=" + Bytes.toString(regionName) + ", server=" + serverName + + return "region=" + Bytes.toString(regionName) + ", origin=" + this.origin + ", state=" + eventType; } } diff --git a/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java b/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java index 7b8f193..7f3c9b0 100644 --- a/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java +++ b/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java @@ -41,7 +41,6 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HMsg; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerInfo; @@ -146,8 +145,14 @@ public class HbaseObjectWritable implements Writable, WritableWithSize, Configur // Hbase types addToMap(HColumnDescriptor.class, code++); addToMap(HConstants.Modify.class, code++); - addToMap(HMsg.class, code++); - addToMap(HMsg[].class, code++); + + // We used to have a class named HMsg but its been removed. Rather than + // just axe it, use following random Integer class -- we just chose any + // class from java.lang -- instead just so codes that follow stay + // in same relative place. + addToMap(Integer.class, code++); + addToMap(Integer[].class, code++); + addToMap(HRegion.class, code++); addToMap(HRegion[].class, code++); addToMap(HRegionInfo.class, code++); diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java b/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java index 25139b3..0209dea 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java @@ -19,22 +19,14 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hadoop.hbase.HMsg; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerInfo; +import java.io.IOException; + import org.apache.hadoop.io.MapWritable; import org.apache.hadoop.ipc.VersionedProtocol; -import java.io.IOException; - /** - * HRegionServers interact with the HMasterRegionInterface to report on local - * goings-on and to obtain data-handling instructions from the HMaster. - *

Changes here need to be reflected in HbaseObjectWritable HbaseRPC#Invoker. - * - *

NOTE: if you change the interface, you must change the RPC version - * number in HBaseRPCProtocolVersion - * + * The Master publishes this Interface for RegionServers to register themselves + * on. */ public interface HMasterRegionInterface extends VersionedProtocol { /** @@ -44,32 +36,18 @@ public interface HMasterRegionInterface extends VersionedProtocol { // maintained a single global version number on all HBase Interfaces. This // meant all HBase RPC was broke though only one of the three RPC Interfaces // had changed. This has since been undone. - public static final long VERSION = 28L; + public static final long VERSION = 29L; /** * Called when a region server first starts - * @param info server info + * @param port Port number this regionserver is up on. + * @param serverStartcode This servers startcode. * @param serverCurrentTime The current time of the region server in ms * @throws IOException e * @return Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, etc. */ - public MapWritable regionServerStartup(HServerInfo info, - long serverCurrentTime) throws IOException; - - /** - * Called to renew lease, tell master what the region server is doing and to - * receive new instructions from the master - * - * @param info server's address and start code - * @param msgs things the region server wants to tell the master - * @param mostLoadedRegions Array of HRegionInfos that should contain the - * reporting server's most loaded regions. These are candidates for being - * rebalanced. - * @return instructions from the master to the region server - * @throws IOException e - */ - public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[], - HRegionInfo mostLoadedRegions[]) + public MapWritable regionServerStartup(final int port, + final long serverStartcode, final long serverCurrentTime) throws IOException; } \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java b/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java index be8abc1..6e71d30 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java @@ -290,9 +290,11 @@ public interface HRegionInterface extends VersionedProtocol, Stoppable, Abortabl /** * Method used when a master is taking the place of another failed one. - * @return The HSI + * @return This servers {@link HServerInfo}; it has RegionServer POV on the + * hostname which may not agree w/ how the Master sees this server. * @throws IOException e */ + // TODO: Deprecate. Uses getServerName instead. Return byte [] public HServerInfo getHServerInfo() throws IOException; /** diff --git a/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java index 66a3345..5125a71 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java +++ b/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java @@ -23,8 +23,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -48,13 +49,17 @@ class ActiveMasterManager extends ZooKeeperListener { final AtomicBoolean clusterHasActiveMaster = new AtomicBoolean(false); - private final HServerAddress address; + private final ServerName sn; private final Server master; - ActiveMasterManager(ZooKeeperWatcher watcher, HServerAddress address, - Server master) { + /** + * @param watcher + * @param sn ServerName + * @param master In an instance of a Master. + */ + ActiveMasterManager(ZooKeeperWatcher watcher, ServerName sn, Server master) { super(watcher); - this.address = address; + this.sn = sn; this.master = master; } @@ -122,11 +127,11 @@ class ActiveMasterManager extends ZooKeeperListener { boolean cleanSetOfActiveMaster = true; // Try to become the active master, watch if there is another master try { - if (ZKUtil.setAddressAndWatch(this.watcher, - this.watcher.masterAddressZNode, this.address)) { + if (ZKUtil.createEphemeralNodeAndWatch(this.watcher, + this.watcher.masterAddressZNode, Bytes.toBytes(this.sn.toString()))) { // We are the master, return this.clusterHasActiveMaster.set(true); - LOG.info("Master=" + this.address); + LOG.info("Master=" + this.sn); return cleanSetOfActiveMaster; } cleanSetOfActiveMaster = false; @@ -134,9 +139,10 @@ class ActiveMasterManager extends ZooKeeperListener { // There is another active master running elsewhere or this is a restart // and the master ephemeral node has not expired yet. this.clusterHasActiveMaster.set(true); - HServerAddress currentMaster = - ZKUtil.getDataAsAddress(this.watcher, this.watcher.masterAddressZNode); - if (currentMaster != null && currentMaster.equals(this.address)) { + byte [] bytes = + ZKUtil.getDataAndWatch(this.watcher, this.watcher.masterAddressZNode); + ServerName currentMaster = new ServerName(Bytes.toString(bytes)); + if (currentMaster != null && currentMaster.equals(this.sn)) { LOG.info("Current master has this master's address, " + currentMaster + "; master was restarted? Waiting on znode to expire..."); // Hurry along the expiration of the znode. @@ -177,11 +183,11 @@ class ActiveMasterManager extends ZooKeeperListener { public void stop() { try { // If our address is in ZK, delete it on our way out - HServerAddress zkAddress = - ZKUtil.getDataAsAddress(watcher, watcher.masterAddressZNode); + byte [] bytes = + ZKUtil.getDataAndWatch(watcher, watcher.masterAddressZNode); // TODO: redo this to make it atomic (only added for tests) - if(zkAddress != null && - zkAddress.equals(address)) { + ServerName master = new ServerName(Bytes.toString(bytes)); + if(master != null && master.equals(this.sn)) { ZKUtil.deleteNode(watcher, watcher.masterAddressZNode); } } catch (KeeperException e) { diff --git a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index f14a0ed..6fe084d 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -43,19 +43,18 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.catalog.RootLocationEditor; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.RegionTransitionData; -import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.master.LoadBalancer.RegionPlan; import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler; import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler; @@ -66,9 +65,9 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.hadoop.hbase.zookeeper.ZKTable; import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData; import org.apache.hadoop.io.Writable; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.AsyncCallback; @@ -122,8 +121,8 @@ public class AssignmentManager extends ZooKeeperListener { * with the other under a lock on {@link #regions} * @see #regions */ - private final NavigableMap> servers = - new TreeMap>(); + private final NavigableMap> servers = + new TreeMap>(); /** * Region to server assignment map. @@ -132,8 +131,8 @@ public class AssignmentManager extends ZooKeeperListener { * with the other under a lock on {@link #regions} * @see #servers */ - private final SortedMap regions = - new TreeMap(); + private final SortedMap regions = + new TreeMap(); private final ExecutorService executorService; @@ -167,6 +166,26 @@ public class AssignmentManager extends ZooKeeperListener { } /** + * Compute the average load across all region servers. + * Currently, this uses a very naive computation - just uses the number of + * regions being served, ignoring stats about number of requests. + * @return the average load + */ + double getAverageLoad() { + int totalLoad = 0; + int numServers = 0; + // Sync on this.regions because access to this.servers always synchronizes + // in this order. + synchronized (this.regions) { + for (Map.Entry> e: servers.entrySet()) { + numServers++; + totalLoad += e.getValue().size(); + } + } + return (double)totalLoad / (double)numServers; + } + + /** * @return Instance of ZKTable. */ public ZKTable getZKTable() { @@ -189,38 +208,51 @@ public class AssignmentManager extends ZooKeeperListener { } /** - * Handle failover. Restore state from META and ZK. Handle any regions in - * transition. Presumes .META. and -ROOT- deployed. - * @throws KeeperException + * Called on startup. + * Figures whether a fresh cluster start of we are joining extant running cluster. * @throws IOException + * @throws KeeperException + * @throws InterruptedException */ - void processFailover() throws KeeperException, IOException { + void joinCluster() throws IOException, KeeperException, InterruptedException { // Concurrency note: In the below the accesses on regionsInTransition are // outside of a synchronization block where usually all accesses to RIT are // synchronized. The presumption is that in this case it is safe since this // method is being played by a single thread on startup. - // TODO: Check list of user regions and their assignments against regionservers. // TODO: Regions that have a null location and are not in regionsInTransitions // need to be handled. - // Scan META to build list of existing regions, servers, and assignment - // Returns servers who have not checked in (assumed dead) and their regions - Map>> deadServers = + Map>> deadServers = rebuildUserRegions(); - // Process list of dead servers - processDeadServers(deadServers); // Check existing regions in transition List nodes = ZKUtil.listChildrenAndWatchForNewChildren(watcher, - watcher.assignmentZNode); - if (nodes.isEmpty()) { - LOG.info("No regions in transition in ZK to process on failover"); - return; + watcher.assignmentZNode); + // Run through all regions. If they are not assigned and not in RIT, then + // its a clean cluster startup, else its a failover. + boolean userRegionsOutOnCluster = false; + for (Map.Entry e: this.regions.entrySet()) { + if (e.getValue() != null) { + userRegionsOutOnCluster = true; + break; + } + if (nodes.contains(e.getKey().getEncodedName())) { + userRegionsOutOnCluster = true; + break; + } } - LOG.info("Failed-over master needs to process " + nodes.size() + - " regions in transition"); - for (String encodedRegionName: nodes) { - processRegionInTransition(encodedRegionName, null); + if (userRegionsOutOnCluster) { + LOG.info("Found regions out on cluster or in RIT; failover"); + processDeadServers(deadServers); + if (!nodes.isEmpty()) { + for (String encodedRegionName: nodes) { + processRegionInTransition(encodedRegionName, null); + } + } + } else { + // Fresh cluster startup. + cleanoutUnassigned(); + assignAllUserRegions(); } } @@ -249,10 +281,10 @@ public class AssignmentManager extends ZooKeeperListener { } /** - * Process failover of encodedName. Look in + * Process failover of servername. Look in RIT. * @param encodedRegionName Region to process failover for. - * @param encodedRegionName RegionInfo. If null we'll go get it from meta table. - * @return + * @param regionInfo If null we'll go get it from meta table. + * @return True if we processed regionInfo as a RIT. * @throws KeeperException * @throws IOException */ @@ -263,7 +295,7 @@ public class AssignmentManager extends ZooKeeperListener { if (data == null) return false; HRegionInfo hri = regionInfo; if (hri == null) { - Pair p = + Pair p = MetaReader.getRegion(catalogTracker, data.getRegionName()); if (p == null) return false; hri = p.getFirst(); @@ -312,17 +344,18 @@ public class AssignmentManager extends ZooKeeperListener { // Region is opened, insert into RIT and handle it regionsInTransition.put(encodedRegionName, new RegionState( regionInfo, RegionState.State.OPENING, data.getStamp())); - HServerInfo hsi = serverManager.getServerInfo(data.getServerName()); + ServerName sn = + data.getOrigin() == null? null: data.getOrigin(); // hsi could be null if this server is no longer online. If // that the case, just let this RIT timeout; it'll be assigned // to new server then. - if (hsi == null) { + if (sn == null) { LOG.warn("Region in transition " + regionInfo.getEncodedName() + - " references a server no longer up " + data.getServerName() + - "; letting RIT timeout so will be assigned elsewhere"); + " references a null server; letting RIT timeout so will be " + + "assigned elsewhere"); break; } - new OpenedRegionHandler(master, this, regionInfo, hsi).process(); + new OpenedRegionHandler(master, this, regionInfo, sn).process(); break; } } @@ -339,18 +372,19 @@ public class AssignmentManager extends ZooKeeperListener { */ private void handleRegion(final RegionTransitionData data) { synchronized(regionsInTransition) { - if (data == null || data.getServerName() == null) { + if (data == null || data.getOrigin() == null) { LOG.warn("Unexpected NULL input " + data); return; } // Check if this is a special HBCK transition - if (data.getServerName().equals(HConstants.HBCK_CODE_NAME)) { + if (data.getOrigin().equals(HConstants.HBCK_CODE_SERVERNAME)) { handleHBCK(data); return; } + ServerName sn = data.getOrigin(); // Verify this is a known server - if (!serverManager.isServerOnline(data.getServerName()) && - !this.master.getServerName().equals(data.getServerName())) { + if (!serverManager.isServerOnline(sn) && + !this.master.getServerName().equals(data.getOrigin())) { LOG.warn("Attempted to handle region transition for server but " + "server is not online: " + data.getRegionName()); return; @@ -358,7 +392,7 @@ public class AssignmentManager extends ZooKeeperListener { String encodedName = HRegionInfo.encodeRegionName(data.getRegionName()); String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName); LOG.debug("Handling transition=" + data.getEventType() + - ", server=" + data.getServerName() + ", region=" + prettyPrintedRegionName); + ", server=" + data.getOrigin() + ", region=" + prettyPrintedRegionName); RegionState regionState = regionsInTransition.get(encodedName); switch (data.getEventType()) { case M_ZK_REGION_OFFLINE: @@ -371,7 +405,7 @@ public class AssignmentManager extends ZooKeeperListener { if (regionState == null || (!regionState.isPendingClose() && !regionState.isClosing())) { LOG.warn("Received CLOSING for region " + prettyPrintedRegionName + - " from server " + data.getServerName() + " but region was in " + + " from server " + data.getOrigin() + " but region was in " + " the state " + regionState + " and not " + "in expected PENDING_CLOSE or CLOSING states"); return; @@ -385,7 +419,7 @@ public class AssignmentManager extends ZooKeeperListener { if (regionState == null || (!regionState.isPendingClose() && !regionState.isClosing())) { LOG.warn("Received CLOSED for region " + prettyPrintedRegionName + - " from server " + data.getServerName() + " but region was in " + + " from server " + data.getOrigin() + " but region was in " + " the state " + regionState + " and not " + "in expected PENDING_CLOSE or CLOSING states"); return; @@ -405,7 +439,7 @@ public class AssignmentManager extends ZooKeeperListener { (!regionState.isPendingOpen() && !regionState.isOpening())) { LOG.warn("Received OPENING for region " + prettyPrintedRegionName + - " from server " + data.getServerName() + " but region was in " + + " from server " + data.getOrigin() + " but region was in " + " the state " + regionState + " and not " + "in expected PENDING_OPEN or OPENING states"); return; @@ -420,7 +454,7 @@ public class AssignmentManager extends ZooKeeperListener { (!regionState.isPendingOpen() && !regionState.isOpening())) { LOG.warn("Received OPENED for region " + prettyPrintedRegionName + - " from server " + data.getServerName() + " but region was in " + + " from server " + data.getOrigin() + " but region was in " + " the state " + regionState + " and not " + "in expected PENDING_OPEN or OPENING states"); return; @@ -429,7 +463,7 @@ public class AssignmentManager extends ZooKeeperListener { regionState.update(RegionState.State.OPEN, data.getStamp()); this.executorService.submit( new OpenedRegionHandler(master, this, regionState.getRegion(), - this.serverManager.getServerInfo(data.getServerName()))); + data.getOrigin())); break; } } @@ -444,7 +478,7 @@ public class AssignmentManager extends ZooKeeperListener { private void handleHBCK(RegionTransitionData data) { String encodedName = HRegionInfo.encodeRegionName(data.getRegionName()); LOG.info("Handling HBCK triggered transition=" + data.getEventType() + - ", server=" + data.getServerName() + ", region=" + + ", server=" + data.getOrigin() + ", region=" + HRegionInfo.prettyPrint(encodedName)); RegionState regionState = regionsInTransition.get(encodedName); switch (data.getEventType()) { @@ -571,9 +605,9 @@ public class AssignmentManager extends ZooKeeperListener { *

* Used when a region has been successfully opened on a region server. * @param regionInfo - * @param serverInfo + * @param sn */ - public void regionOnline(HRegionInfo regionInfo, HServerInfo serverInfo) { + public void regionOnline(HRegionInfo regionInfo, ServerName sn) { synchronized (this.regionsInTransition) { RegionState rs = this.regionsInTransition.remove(regionInfo.getEncodedName()); @@ -583,22 +617,22 @@ public class AssignmentManager extends ZooKeeperListener { } synchronized (this.regions) { // Add check - HServerInfo hsi = this.regions.get(regionInfo); - if (hsi != null) LOG.warn("Overwriting " + regionInfo.getEncodedName() + - " on " + hsi); - this.regions.put(regionInfo, serverInfo); - addToServers(serverInfo, regionInfo); + ServerName oldSn = this.regions.get(regionInfo); + if (oldSn != null) LOG.warn("Overwriting " + regionInfo.getEncodedName() + + " on " + oldSn + " with " + sn); + this.regions.put(regionInfo, sn); + addToServers(sn, regionInfo); this.regions.notifyAll(); } // Remove plan if one. clearRegionPlan(regionInfo); // Update timers for all regions in transition going against this server. - updateTimers(serverInfo); + updateTimers(sn); } /** * Touch timers for all regions in transition that have the passed - * hsi in common. + * sn in common. * Call this method whenever a server checks in. Doing so helps the case where * a new regionserver has joined the cluster and its been given 1k regions to * open. If this method is tickled every time the region reports in a @@ -607,9 +641,9 @@ public class AssignmentManager extends ZooKeeperListener { * as part of bulk assign -- there we have a different mechanism for extending * the regions in transition timer (we turn it off temporarily -- because * there is no regionplan involved when bulk assigning. - * @param hsi + * @param sn */ - private void updateTimers(final HServerInfo hsi) { + private void updateTimers(final ServerName sn) { // This loop could be expensive. // First make a copy of current regionPlan rather than hold sync while // looping because holding sync can cause deadlock. Its ok in this loop @@ -619,7 +653,7 @@ public class AssignmentManager extends ZooKeeperListener { copy.putAll(this.regionPlans); } for (Map.Entry e: copy.entrySet()) { - if (!e.getValue().getDestination().equals(hsi)) continue; + if (!e.getValue().getDestination().equals(sn)) continue; RegionState rs = null; synchronized (this.regionsInTransition) { rs = this.regionsInTransition.get(e.getKey()); @@ -658,11 +692,11 @@ public class AssignmentManager extends ZooKeeperListener { */ public void setOffline(HRegionInfo regionInfo) { synchronized (this.regions) { - HServerInfo serverInfo = this.regions.remove(regionInfo); - if (serverInfo == null) return; - List serverRegions = this.servers.get(serverInfo); + ServerName sn = this.regions.remove(regionInfo); + if (sn == null) return; + List serverRegions = this.servers.get(sn); if (!serverRegions.remove(regionInfo)) { - LOG.warn("No " + regionInfo + " on " + serverInfo); + LOG.warn("No " + regionInfo + " on " + sn); } } } @@ -737,10 +771,10 @@ public class AssignmentManager extends ZooKeeperListener { * @param destination * @param regions Regions to assign. */ - void assign(final HServerInfo destination, + void assign(final ServerName destination, final List regions) { LOG.debug("Bulk assigning " + regions.size() + " region(s) to " + - destination.getServerName()); + destination.toString()); List states = new ArrayList(regions.size()); synchronized (this.regionsInTransition) { @@ -763,7 +797,7 @@ public class AssignmentManager extends ZooKeeperListener { for (int oldCounter = 0; true;) { int count = counter.get(); if (oldCounter != count) { - LOG.info(destination.getServerName() + " unassigned znodes=" + count + + LOG.info(destination.toString() + " unassigned znodes=" + count + " of total=" + total); oldCounter = count; } @@ -778,7 +812,7 @@ public class AssignmentManager extends ZooKeeperListener { this.master.abort("Failed assignment of regions to " + destination, t); return; } - LOG.debug("Bulk assigning done for " + destination.getServerName()); + LOG.debug("Bulk assigning done for " + destination.toString()); } /** @@ -787,11 +821,11 @@ public class AssignmentManager extends ZooKeeperListener { static class CreateUnassignedAsyncCallback implements AsyncCallback.StringCallback { private final Log LOG = LogFactory.getLog(CreateUnassignedAsyncCallback.class); private final ZooKeeperWatcher zkw; - private final HServerInfo destination; + private final ServerName destination; private final AtomicInteger counter; CreateUnassignedAsyncCallback(final ZooKeeperWatcher zkw, - final HServerInfo destination, final AtomicInteger counter) { + final ServerName destination, final AtomicInteger counter) { this.zkw = zkw; this.destination = destination; this.counter = counter; @@ -807,7 +841,7 @@ public class AssignmentManager extends ZooKeeperListener { ", rc=" + rc, null); return; } - LOG.debug("rs=" + (RegionState)ctx + ", server=" + this.destination.getServerName()); + LOG.debug("rs=" + (RegionState)ctx + ", server=" + this.destination.toString()); // Async exists to set a watcher so we'll get triggered when // unassigned node changes. this.zkw.getZooKeeper().exists(path, this.zkw, @@ -894,7 +928,7 @@ public class AssignmentManager extends ZooKeeperListener { if (plan == null) return; // Should get reassigned later when RIT times out. try { LOG.debug("Assigning region " + state.getRegion().getRegionNameAsString() + - " to " + plan.getDestination().getServerName()); + " to " + plan.getDestination().toString()); // Transition RegionState to PENDING_OPEN state.update(RegionState.State.PENDING_OPEN); // Send OPEN RPC. This can fail if the server on other end is is not up. @@ -935,7 +969,7 @@ public class AssignmentManager extends ZooKeeperListener { state.update(RegionState.State.OFFLINE); try { if(!ZKAssign.createOrForceNodeOffline(master.getZooKeeper(), - state.getRegion(), master.getServerName())) { + state.getRegion(), this.master.getServerName())) { LOG.warn("Attempted to create/force node into OFFLINE state before " + "completing assignment but failed to do so for " + state); return false; @@ -964,7 +998,7 @@ public class AssignmentManager extends ZooKeeperListener { state.update(RegionState.State.OFFLINE); try { ZKAssign.asyncCreateNodeOffline(master.getZooKeeper(), state.getRegion(), - master.getServerName(), cb, ctx); + this.master.getServerName(), cb, ctx); } catch (KeeperException e) { master.abort("Unexpected ZK exception creating/setting node OFFLINE", e); return false; @@ -992,10 +1026,10 @@ public class AssignmentManager extends ZooKeeperListener { * if no servers to assign, it returns null). */ RegionPlan getRegionPlan(final RegionState state, - final HServerInfo serverToExclude, final boolean forceNewPlan) { + final ServerName serverToExclude, final boolean forceNewPlan) { // Pickup existing plan or make a new one String encodedName = state.getRegion().getEncodedName(); - List servers = this.serverManager.getOnlineServersList(); + List servers = this.serverManager.getOnlineServersList(); // The remove below hinges on the fact that the call to // serverManager.getOnlineServersList() returns a copy if (serverToExclude != null) servers.remove(serverToExclude); @@ -1083,7 +1117,7 @@ public class AssignmentManager extends ZooKeeperListener { } } // Send CLOSE RPC - HServerInfo server = null; + ServerName server = null; synchronized (this.regions) { server = regions.get(region); } @@ -1191,10 +1225,11 @@ public class AssignmentManager extends ZooKeeperListener { * @throws InterruptedException * @throws IOException */ - public void assignUserRegions(List regions, List servers) throws IOException, InterruptedException { + public void assignUserRegions(List regions, List servers) + throws IOException, InterruptedException { if (regions == null) return; - Map> bulkPlan = null; + Map> bulkPlan = null; // Generate a round-robin bulk assignment plan bulkPlan = LoadBalancer.roundRobinAssignment(regions, servers); LOG.info("Bulk assigning " + regions.size() + " region(s) round-robin across " + @@ -1216,10 +1251,10 @@ public class AssignmentManager extends ZooKeeperListener { */ public void assignAllUserRegions() throws IOException, InterruptedException { // Get all available servers - List servers = serverManager.getOnlineServersList(); + List servers = serverManager.getOnlineServersList(); // Scan META for all user regions, skipping any disabled tables - Map allRegions = + Map allRegions = MetaReader.fullScan(catalogTracker, this.zkTable.getDisabledTables(), true); if (allRegions == null || allRegions.isEmpty()) return; @@ -1227,7 +1262,7 @@ public class AssignmentManager extends ZooKeeperListener { boolean retainAssignment = master.getConfiguration(). getBoolean("hbase.master.startup.retainassign", true); - Map> bulkPlan = null; + Map> bulkPlan = null; if (retainAssignment) { // Reuse existing assignment info bulkPlan = LoadBalancer.retainAssignment(allRegions, servers); @@ -1249,11 +1284,11 @@ public class AssignmentManager extends ZooKeeperListener { * Run bulk assign on startup. */ static class BulkStartupAssigner extends BulkAssigner { - private final Map> bulkPlan; + private final Map> bulkPlan; private final AssignmentManager assignmentManager; BulkStartupAssigner(final Server server, - final Map> bulkPlan, + final Map> bulkPlan, final AssignmentManager am) { super(server); this.bulkPlan = bulkPlan; @@ -1279,7 +1314,7 @@ public class AssignmentManager extends ZooKeeperListener { @Override protected void populatePool(java.util.concurrent.ExecutorService pool) { - for (Map.Entry> e: this.bulkPlan.entrySet()) { + for (Map.Entry> e: this.bulkPlan.entrySet()) { pool.execute(new SingleServerBulkAssigner(e.getKey(), e.getValue(), this.assignmentManager)); } @@ -1295,11 +1330,11 @@ public class AssignmentManager extends ZooKeeperListener { * Manage bulk assigning to a server. */ static class SingleServerBulkAssigner implements Runnable { - private final HServerInfo regionserver; + private final ServerName regionserver; private final List regions; private final AssignmentManager assignmentManager; - SingleServerBulkAssigner(final HServerInfo regionserver, + SingleServerBulkAssigner(final ServerName regionserver, final List regions, final AssignmentManager am) { this.regionserver = regionserver; this.regions = regions; @@ -1346,28 +1381,26 @@ public class AssignmentManager extends ZooKeeperListener { * in META * @throws IOException */ - private Map>> rebuildUserRegions() + Map>> rebuildUserRegions() throws IOException { // Region assignment from META - List results = MetaReader.fullScanOfResults(catalogTracker); + List results = MetaReader.fullScanOfResults(this.catalogTracker); // Map of offline servers and their regions to be returned - Map>> offlineServers = - new TreeMap>>(); + Map>> offlineServers = + new TreeMap>>(); // Iterate regions in META for (Result result : results) { - Pair region = - MetaReader.metaRowToRegionPairWithInfo(result); + Pair region = MetaReader.metaRowToRegionPair(result); if (region == null) continue; - HServerInfo regionLocation = region.getSecond(); HRegionInfo regionInfo = region.getFirst(); + ServerName regionLocation = region.getSecond(); if (regionLocation == null) { // Region not being served, add to region map with no assignment // If this needs to be assigned out, it will also be in ZK as RIT this.regions.put(regionInfo, null); - } else if (!serverManager.isServerOnline( - regionLocation.getServerName())) { + } else if (!this.serverManager.isServerOnline(regionLocation)) { // Region is located on a server that isn't online - List> offlineRegions = + List> offlineRegions = offlineServers.get(regionLocation); if (offlineRegions == null) { offlineRegions = new ArrayList>(1); @@ -1376,7 +1409,7 @@ public class AssignmentManager extends ZooKeeperListener { offlineRegions.add(new Pair(regionInfo, result)); } else { // Region is being served and on an active server - regions.put(regionInfo, regionLocation); + this.regions.put(regionInfo, regionLocation); addToServers(regionLocation, regionInfo); } } @@ -1397,9 +1430,9 @@ public class AssignmentManager extends ZooKeeperListener { * @throws KeeperException */ private void processDeadServers( - Map>> deadServers) + Map>> deadServers) throws IOException, KeeperException { - for (Map.Entry>> deadServer : + for (Map.Entry>> deadServer: deadServers.entrySet()) { List> regions = deadServer.getValue(); for (Pair region : regions) { @@ -1408,7 +1441,7 @@ public class AssignmentManager extends ZooKeeperListener { // If region was in transition (was in zk) force it offline for reassign try { ZKAssign.createOrForceNodeOffline(watcher, regionInfo, - master.getServerName()); + this.master.getServerName()); } catch (KeeperException.NoNodeException nne) { // This is fine } @@ -1424,11 +1457,11 @@ public class AssignmentManager extends ZooKeeperListener { * @param hsi * @param hri */ - private void addToServers(final HServerInfo hsi, final HRegionInfo hri) { - List hris = servers.get(hsi); + private void addToServers(final ServerName sn, final HRegionInfo hri) { + List hris = servers.get(sn); if (hris == null) { hris = new ArrayList(); - servers.put(hsi, hris); + servers.put(sn, hris); } hris.add(hri); } @@ -1636,7 +1669,7 @@ public class AssignmentManager extends ZooKeeperListener { try { data = new RegionTransitionData( EventType.M_ZK_REGION_OFFLINE, regionInfo.getRegionName(), - master.getServerName()); + master.getServerName()); if (ZKUtil.setData(watcher, node, data.getBytes(), stat.getVersion())) { // Node is now OFFLINE, let's trigger another assignment @@ -1693,16 +1726,16 @@ public class AssignmentManager extends ZooKeeperListener { /** * Process shutdown server removing any assignments. - * @param hsi Server that went down. + * @param sn Server that went down. * @return list of regions in transition on this server */ - public List processServerShutdown(final HServerInfo hsi) { + public List processServerShutdown(final ServerName sn) { // Clean out any existing assignment plans for this server synchronized (this.regionPlans) { for (Iterator > i = this.regionPlans.entrySet().iterator(); i.hasNext();) { Map.Entry e = i.next(); - if (e.getValue().getDestination().equals(hsi)) { + if (e.getValue().getDestination().equals(sn)) { // Use iterator's remove else we'll get CME i.remove(); } @@ -1714,7 +1747,7 @@ public class AssignmentManager extends ZooKeeperListener { Set deadRegions = null; List rits = new ArrayList(); synchronized (this.regions) { - List assignedRegions = this.servers.remove(hsi); + List assignedRegions = this.servers.remove(sn); if (assignedRegions == null || assignedRegions.isEmpty()) { // No regions on this server, we are done, return empty list of RITs return rits; @@ -1739,12 +1772,12 @@ public class AssignmentManager extends ZooKeeperListener { /** * Update inmemory structures. - * @param hsi Server that reported the split + * @param sn Server that reported the split * @param parent Parent region that was split * @param a Daughter region A * @param b Daughter region B */ - public void handleSplitReport(final HServerInfo hsi, final HRegionInfo parent, + public void handleSplitReport(final ServerName sn, final HRegionInfo parent, final HRegionInfo a, final HRegionInfo b) { regionOffline(parent); // Remove any CLOSING node, if exists, due to race between master & rs @@ -1765,8 +1798,8 @@ public class AssignmentManager extends ZooKeeperListener { LOG.warn("Exception while validating RIT during split report", e); } - regionOnline(a, hsi); - regionOnline(b, hsi); + regionOnline(a, sn); + regionOnline(b, sn); // There's a possibility that the region was splitting while a user asked // the master to disable, we need to make sure we close those regions in @@ -1784,21 +1817,16 @@ public class AssignmentManager extends ZooKeeperListener { * If a new server has come in and it has no regions, it will not be included * in the returned Map. */ - Map> getAssignments() { + Map> getAssignments() { // This is an EXPENSIVE clone. Cloning though is the safest thing to do. // Can't let out original since it can change and at least the loadbalancer // wants to iterate this exported list. We need to synchronize on regions // since all access to this.servers is under a lock on this.regions. - Map> result = null; + Map> result = null; synchronized (this.regions) { - result = new HashMap>(this.servers.size()); - for (Map.Entry> e: this.servers.entrySet()) { - List shallowCopy = new ArrayList(e.getValue()); - HServerInfo clone = new HServerInfo(e.getKey()); - // Set into server load the number of regions this server is carrying - // The load balancer calculation needs it at least and its handy. - clone.getLoad().setNumberOfRegions(e.getValue().size()); - result.put(clone, shallowCopy); + result = new HashMap>(this.servers.size()); + for (Map.Entry> e: this.servers.entrySet()) { + result.put(e.getKey(), new ArrayList(e.getValue())); } } return result; @@ -1809,12 +1837,12 @@ public class AssignmentManager extends ZooKeeperListener { * @return Null or a {@link Pair} instance that holds the full {@link HRegionInfo} * and the hosting servers {@link HServerInfo}. */ - Pair getAssignment(final byte [] encodedRegionName) { + Pair getAssignment(final byte [] encodedRegionName) { String name = Bytes.toString(encodedRegionName); synchronized(this.regions) { - for (Map.Entry e: this.regions.entrySet()) { + for (Map.Entry e: this.regions.entrySet()) { if (e.getKey().getEncodedName().equals(name)) { - return new Pair(e.getKey(), e.getValue()); + return new Pair(e.getKey(), e.getValue()); } } } diff --git a/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index efcbb99..38f78ad 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ b/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -27,7 +27,7 @@ import java.util.List; import java.util.Set; import org.apache.commons.lang.NotImplementedException; -import org.apache.hadoop.hbase.HServerInfo; +import org.apache.hadoop.hbase.ServerName; /** * Class to hold dead servers list and utility querying dead server list. @@ -58,7 +58,7 @@ public class DeadServer implements Set { } /** - * @param serverName + * @param serverName Server name * @return true if server is dead */ public boolean isDeadServer(final String serverName) { @@ -74,7 +74,7 @@ public class DeadServer implements Set { * @return true if server is dead */ boolean isDeadServer(final String serverName, final boolean hostAndPortOnly) { - return HServerInfo.isServer(this, serverName, hostAndPortOnly); + return ServerName.isServer(this, serverName, hostAndPortOnly); } /** diff --git a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 3f3b696..62213b5 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.net.InetSocketAddress; -import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -37,14 +36,12 @@ import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HMsg; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; @@ -55,8 +52,8 @@ import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.MetaScanner; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; +import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType; import org.apache.hadoop.hbase.ipc.HBaseRPC; @@ -130,8 +127,6 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { // RPC server for the HMaster private final RpcServer rpcServer; - // Address of the HMaster - private final HServerAddress address; // Metrics for the HMaster private final MasterMetrics metrics; // file system manager for the master FS operations @@ -171,6 +166,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { private LogCleaner logCleaner; private MasterCoprocessorHost cpHost; + private final ServerName serverName; /** * Initializes the HMaster. The steps are as follows: @@ -188,39 +184,46 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { throws IOException, KeeperException, InterruptedException { this.conf = conf; - /* - * Determine address and initialize RPC server (but do not start). - * The RPC server ports can be ephemeral. Create a ZKW instance. - */ - HServerAddress a = new HServerAddress(getMyAddress(this.conf)); - int numHandlers = conf.getInt("hbase.regionserver.handler.count", 10); + // Server to handle client requests. + String hostname = DNS.getDefaultHost( + conf.get("hbase.master.dns.interface", "default"), + conf.get("hbase.master.dns.nameserver", "default")); + int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); + // Creation of a HSA will force a resolve. + InetSocketAddress isa = new InetSocketAddress(hostname, port); + if (isa.getAddress() == null) { + throw new IllegalArgumentException("Failed resolve of " + isa); + } + int numHandlers = conf.getInt("hbase.master.handler.count", + conf.getInt("hbase.regionserver.handler.count", 25)); this.rpcServer = HBaseRPC.getServer(this, new Class[]{HMasterInterface.class, HMasterRegionInterface.class}, - a.getBindAddress(), a.getPort(), - numHandlers, - 0, // we dont use high priority handlers in master - false, conf, - 0); // this is a DNC w/o high priority handlers - this.address = new HServerAddress(rpcServer.getListenerAddress()); - + isa.getHostName(), // BindAddress is IP we got for this server. + isa.getPort(), + numHandlers, + 0, // we dont use high priority handlers in master + false, conf, + 0); // this is a DNC w/o high priority handlers + // Update isa with what is in rpc; it may have changed the port; e.g. if + // we were asked bind to port 0. + isa = this.rpcServer.getListenerAddress(); // set the thread name now we have an address - setName(MASTER + "-" + this.address); + setName(MASTER + "-" + isa.toString()); Replication.decorateMasterConfiguration(this.conf); - this.rpcServer.startThreads(); + this.serverName = new ServerName(isa.getHostName(), isa.getPort(), + System.currentTimeMillis()); // Hack! Maps DFSClient => Master for logs. HDFS made this // config param for task trackers, but we can piggyback off of it. if (this.conf.get("mapred.task.id") == null) { - this.conf.set("mapred.task.id", "hb_m_" + this.address.toString() + + this.conf.set("mapred.task.id", "hb_m_" + this.serverName.toString() + "_" + System.currentTimeMillis()); } - this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" + - address.getPort(), this); - - this.metrics = new MasterMetrics(getServerName()); + this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" + isa.getPort(), this); + this.metrics = new MasterMetrics(getServerName().toString()); } /** @@ -271,7 +274,8 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { * now wait until it dies to try and become the next active master. If we * do not succeed on our first attempt, this is no longer a cluster startup. */ - this.activeMasterManager = new ActiveMasterManager(zooKeeper, address, this); + this.activeMasterManager = + new ActiveMasterManager(zooKeeper, this.serverName, this); this.zooKeeper.registerListener(activeMasterManager); stallIfBackupMaster(this.conf, this.activeMasterManager); this.activeMasterManager.blockUntilBecomingActiveMaster(); @@ -343,9 +347,9 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring. this.fileSystemManager = new MasterFileSystem(this, metrics); this.connection = HConnectionManager.getConnection(conf); - this.executorService = new ExecutorService(getServerName()); + this.executorService = new ExecutorService(getServerName().toString()); - this.serverManager = new ServerManager(this, this, metrics); + this.serverManager = new ServerManager(this, this); this.catalogTracker = new CatalogTracker(this.zooKeeper, this.connection, this, conf.getInt("hbase.master.catalog.timeout", Integer.MAX_VALUE)); @@ -366,7 +370,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { boolean wasUp = this.clusterStatusTracker.isClusterUp(); if (!wasUp) this.clusterStatusTracker.setClusterUp(); - LOG.info("Server active/primary master; " + this.address + + LOG.info("Server active/primary master; " + this.serverName.toString() + ", sessionid=0x" + Long.toHexString(this.zooKeeper.getZooKeeper().getSessionId()) + ", cluster-up flag was=" + wasUp); @@ -377,8 +381,17 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { // start up all service threads. startServiceThreads(); - // Wait for region servers to report in. Returns count of regions. - int regionCount = this.serverManager.waitForRegionServers(); + // Wait for region servers to report in. + this.serverManager.waitForRegionServers(); + // Check zk for regionservers that are up but didn't register + for (ServerName sn: this.regionServerTracker.getOnlineServers()) { + if (!this.serverManager.isServerOnline(sn)) { + // Not registered; add it. + LOG.info("Registering server found up in zk: " + sn); + this.serverManager.recordNewServer( + new InetSocketAddress(sn.getHostname(), sn.getPort()), sn); + } + } // TODO: Should do this in background rather than block master startup this.fileSystemManager. @@ -386,20 +399,8 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { // Make sure root and meta assigned before proceeding. assignRootAndMeta(); - - // Is this fresh start with no regions assigned or are we a master joining - // an already-running cluster? If regionsCount == 0, then for sure a - // fresh start. TOOD: Be fancier. If regionsCount == 2, perhaps the - // 2 are .META. and -ROOT- and we should fall into the fresh startup - // branch below. For now, do processFailover. - if (regionCount == 0) { - LOG.info("Master startup proceeding: cluster startup"); - this.assignmentManager.cleanoutUnassigned(); - this.assignmentManager.assignAllUserRegions(); - } else { - LOG.info("Master startup proceeding: master failover"); - this.assignmentManager.processFailover(); - } + // Fixup assignment manager status + this.assignmentManager.joinCluster(); // Start balancer and meta catalog janitor after meta and regions have // been assigned. @@ -451,27 +452,15 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { return assigned; } - /* - * @return This masters' address. - * @throws UnknownHostException - */ - private static String getMyAddress(final Configuration c) - throws UnknownHostException { - // Find out our address up in DNS. - String s = DNS.getDefaultHost(c.get("hbase.master.dns.interface","default"), - c.get("hbase.master.dns.nameserver","default")); - s += ":" + c.get(HConstants.MASTER_PORT, - Integer.toString(HConstants.DEFAULT_MASTER_PORT)); - return s; - } - - /** @return HServerAddress of the master server */ - public HServerAddress getMasterAddress() { - return this.address; - } - - public long getProtocolVersion(String protocol, long clientVersion) { - return HMasterInterface.VERSION; + @Override + public long getProtocolVersion(String protocol, long clientVersion) + throws IOException { + if (HMasterRegionInterface.class.getCanonicalName().equals(protocol)) { + return HMasterRegionInterface.VERSION; + } else if (HMasterInterface.class.getCanonicalName().equals(protocol)) { + return HMasterInterface.VERSION; + } + throw new IOException("Unknown protocol " + protocol); } /** @return InfoServer object. Maybe null.*/ @@ -601,25 +590,22 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { } @Override - public MapWritable regionServerStartup(final HServerInfo serverInfo, - final long serverCurrentTime) + public MapWritable regionServerStartup(final int port, + final long serverStartCode, final long serverCurrentTime) throws IOException { - // Set the ip into the passed in serverInfo. Its ip is more than likely - // not the ip that the master sees here. See at end of this method where - // we pass it back to the regionserver by setting "hbase.regionserver.address" - // Everafter, the HSI combination 'server name' is what uniquely identifies - // the incoming RegionServer. - InetSocketAddress address = new InetSocketAddress( - HBaseServer.getRemoteIp().getHostName(), - serverInfo.getServerAddress().getPort()); - serverInfo.setServerAddress(new HServerAddress(address)); - + // This call to InetSocketAddress will resolve the hostname to an IP. + InetSocketAddress isa = new InetSocketAddress( + HBaseServer.getRemoteIp().getHostName(), port); + if (isa.isUnresolved()) { + LOG.warn("Failed resolve of " + isa); + } // Register with server manager - this.serverManager.regionServerStartup(serverInfo, serverCurrentTime); + this.serverManager.regionServerStartup(isa, serverStartCode, + serverCurrentTime); // Send back some config info MapWritable mw = createConfigurationSubset(); - mw.put(new Text("hbase.regionserver.address"), - serverInfo.getServerAddress()); + mw.put(new Text(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER), + new Text(this.serverName.getHostname())); return mw; } @@ -637,26 +623,6 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { return mw; } - @Override - public HMsg [] regionServerReport(HServerInfo serverInfo, HMsg msgs[], - HRegionInfo[] mostLoadedRegions) - throws IOException { - return adornRegionServerAnswer(serverInfo, - this.serverManager.regionServerReport(serverInfo, msgs, mostLoadedRegions)); - } - - /** - * Override if you'd add messages to return to regionserver hsi - * or to send an exception. - * @param msgs Messages to add to - * @return Messages to return to - * @throws IOException exceptions that were injected for the region servers - */ - protected HMsg [] adornRegionServerAnswer(final HServerInfo hsi, - final HMsg [] msgs) throws IOException { - return msgs; - } - public boolean isMasterRunning() { return !isStopped(); } @@ -693,14 +659,13 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { } } - Map> assignments = + Map> assignments = this.assignmentManager.getAssignments(); // Returned Map from AM does not include mention of servers w/o assignments. - for (Map.Entry e: + for (Map.Entry e: this.serverManager.getOnlineServers().entrySet()) { - HServerInfo hsi = e.getValue(); - if (!assignments.containsKey(hsi)) { - assignments.put(hsi, new ArrayList()); + if (!assignments.containsKey(e.getKey())) { + assignments.put(e.getKey(), new ArrayList()); } } List plans = this.balancer.balanceCluster(assignments); @@ -754,12 +719,12 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { @Override public void move(final byte[] encodedRegionName, final byte[] destServerName) throws UnknownRegionException { - Pair p = + Pair p = this.assignmentManager.getAssignment(encodedRegionName); if (p == null) throw new UnknownRegionException(Bytes.toString(encodedRegionName)); HRegionInfo hri = p.getFirst(); - HServerInfo dest = null; + ServerName dest = null; if (destServerName == null || destServerName.length == 0) { LOG.info("Passed destination servername is null/empty so " + "choosing a server at random"); @@ -767,12 +732,12 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { // Unassign will reassign it elsewhere choosing random server. this.assignmentManager.unassign(hri); } else { - dest = this.serverManager.getServerInfo(new String(destServerName)); - + dest = new ServerName(Bytes.toString(destServerName)); if (this.cpHost != null) { this.cpHost.preMove(p.getFirst(), p.getSecond(), dest); } RegionPlan rp = new RegionPlan(p.getFirst(), p.getSecond(), dest); + LOG.info("Added move plan " + rp + ", running balancer"); this.assignmentManager.balance(rp); if (this.cpHost != null) { this.cpHost.postMove(p.getFirst(), p.getSecond(), dest); @@ -851,7 +816,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { } // 5. Trigger immediate assignment of the regions in round-robin fashion - List servers = serverManager.getOnlineServersList(); + List servers = serverManager.getOnlineServersList(); try { this.assignmentManager.assignUserRegions(Arrays.asList(newRegions), servers); } catch (InterruptedException ie) { @@ -955,11 +920,11 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { * is found, but not currently deployed, the second element of the pair * may be null. */ - Pair getTableRegionForRow( + Pair getTableRegionForRow( final byte [] tableName, final byte [] rowKey) throws IOException { - final AtomicReference> result = - new AtomicReference>(null); + final AtomicReference> result = + new AtomicReference>(null); MetaScannerVisitor visitor = new MetaScannerVisitor() { @@ -968,13 +933,11 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { if (data == null || data.size() <= 0) { return true; } - Pair pair = - MetaReader.metaRowToRegionPair(data); + Pair pair = MetaReader.metaRowToRegionPair(data); if (pair == null) { return false; } - if (!Bytes.equals(pair.getFirst().getTableDesc().getName(), - tableName)) { + if (!Bytes.equals(pair.getFirst().getTableDesc().getName(), tableName)) { return false; } result.set(pair); @@ -1025,7 +988,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { public ClusterStatus getClusterStatus() { ClusterStatus status = new ClusterStatus(); status.setHBaseVersion(VersionInfo.getVersion()); - status.setServerInfo(serverManager.getOnlineServers().values()); + status.setServers(serverManager.getOnlineServers().keySet()); status.setDeadServers(serverManager.getDeadServers()); status.setRegionsInTransition(assignmentManager.getRegionsInTransition()); return status; @@ -1049,8 +1012,8 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { } @Override - public String getServerName() { - return address.toString(); + public ServerName getServerName() { + return this.serverName; } @Override @@ -1140,7 +1103,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { return; } } - Pair pair = + Pair pair = MetaReader.getRegion(this.catalogTracker, regionName); if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName)); assignRegion(pair.getFirst()); @@ -1161,7 +1124,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { return; } } - Pair pair = + Pair pair = MetaReader.getRegion(this.catalogTracker, regionName); if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName)); HRegionInfo hri = pair.getFirst(); @@ -1173,6 +1136,16 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { } /** + * Compute the average load across all region servers. + * Currently, this uses a very naive computation - just uses the number of + * regions being served, ignoring stats about number of requests. + * @return the average load + */ + public double getAverageLoad() { + return this.assignmentManager.getAverageLoad(); + } + + /** * Utility for constructing an instance of the passed HMaster class. * @param masterClass * @param conf @@ -1197,11 +1170,10 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { } } - /** * @see org.apache.hadoop.hbase.master.HMasterCommandLine */ public static void main(String [] args) throws Exception { new HMasterCommandLine(HMaster.class).doMain(args); } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index 3d102c1..fa4c8e8 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.Comparator; import java.util.List; import java.util.Map; +import java.util.NavigableMap; import java.util.NavigableSet; import java.util.Random; import java.util.TreeMap; @@ -36,8 +37,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; +import org.apache.hadoop.hbase.ServerName; /** * Makes decisions about the placement and movement of Regions across @@ -60,6 +60,25 @@ public class LoadBalancer { private static final Random rand = new Random(); /** + * Data structure that holds servername and 'load'. + */ + static class ServerAndLoad implements Comparator { + private final ServerName sn; + private final int load; + ServerAndLoad(final ServerName sn, final int load) { + this.sn = sn; + this.load = load; + } + @Override + public int compare(ServerAndLoad left, ServerAndLoad right) { + return right.load - left.load; + } + + ServerName getServerName() {return this.sn;} + int getLoad() {return this.load;} + } + + /** * Generate a global load balancing plan according to the specified map of * server information to the most loaded regions of each server. * @@ -128,38 +147,33 @@ public class LoadBalancer { * or null if cluster is already balanced */ public List balanceCluster( - Map> clusterState) { + Map> clusterState) { long startTime = System.currentTimeMillis(); - - // Make a map sorted by load and count regions - TreeMap> serversByLoad = - new TreeMap>( - new HServerInfo.LoadComparator()); int numServers = clusterState.size(); if (numServers == 0) { LOG.debug("numServers=0 so skipping load balancing"); return null; } + NavigableMap> serversByLoad = + new TreeMap>(); int numRegions = 0; // Iterate so we can count regions as we build the map - for(Map.Entry> server: - clusterState.entrySet()) { - server.getKey().getLoad().setNumberOfRegions(server.getValue().size()); - numRegions += server.getKey().getLoad().getNumberOfRegions(); - serversByLoad.put(server.getKey(), server.getValue()); + for(Map.Entry> server: clusterState.entrySet()) { + List regions = server.getValue(); + serversByLoad.put(new ServerAndLoad(server.getKey(), regions.size()), regions); } - // Check if we even need to do any load balancing float average = (float)numRegions / numServers; // for logging int min = numRegions / numServers; int max = numRegions % numServers == 0 ? min : min + 1; - if(serversByLoad.lastKey().getLoad().getNumberOfRegions() <= max && - serversByLoad.firstKey().getLoad().getNumberOfRegions() >= min) { + if (serversByLoad.lastKey().getLoad() <= max && + serversByLoad.firstKey().getLoad() >= min) { // Skipped because no server outside (min,max) range - LOG.info("Skipping load balancing. servers=" + numServers + " " + - "regions=" + numRegions + " average=" + average + " " + - "mostloaded=" + serversByLoad.lastKey().getLoad().getNumberOfRegions() + - " leastloaded=" + serversByLoad.lastKey().getLoad().getNumberOfRegions()); + LOG.info("Skipping load balancing because balanced cluster; " + + "servers=" + numServers + " " + + "regions=" + numRegions + " average=" + average + " " + + "mostloaded=" + serversByLoad.lastKey().getLoad() + + " leastloaded=" + serversByLoad.lastKey().getLoad()); return null; } @@ -170,14 +184,14 @@ public class LoadBalancer { // Walk down most loaded, pruning each to the max int serversOverloaded = 0; - Map serverBalanceInfo = - new TreeMap(); - for(Map.Entry> server : + Map serverBalanceInfo = + new TreeMap(); + for(Map.Entry> server: serversByLoad.descendingMap().entrySet()) { - HServerInfo serverInfo = server.getKey(); - int regionCount = serverInfo.getLoad().getNumberOfRegions(); - if(regionCount <= max) { - serverBalanceInfo.put(serverInfo, new BalanceInfo(0, 0)); + ServerAndLoad sal = server.getKey(); + int regionCount = sal.getLoad(); + if (regionCount <= max) { + serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(0, 0)); break; } serversOverloaded++; @@ -187,20 +201,20 @@ public class LoadBalancer { for (HRegionInfo hri: regions) { // Don't rebalance meta regions. if (hri.isMetaRegion()) continue; - regionsToMove.add(new RegionPlan(hri, serverInfo, null)); + regionsToMove.add(new RegionPlan(hri, sal.getServerName(), null)); numTaken++; if (numTaken >= numToOffload) break; } - serverBalanceInfo.put(serverInfo, - new BalanceInfo(numToOffload, (-1)*numTaken)); + serverBalanceInfo.put(sal.getServerName(), + new BalanceInfo(numToOffload, (-1)*numTaken)); } // Walk down least loaded, filling each to the min int serversUnderloaded = 0; // number of servers that get new regions int neededRegions = 0; // number of regions needed to bring all up to min - for(Map.Entry> server : + for(Map.Entry> server : serversByLoad.entrySet()) { - int regionCount = server.getKey().getLoad().getNumberOfRegions(); + int regionCount = server.getKey().getLoad(); if(regionCount >= min) { break; } @@ -208,11 +222,11 @@ public class LoadBalancer { int numToTake = min - regionCount; int numTaken = 0; while(numTaken < numToTake && regionidx < regionsToMove.size()) { - regionsToMove.get(regionidx).setDestination(server.getKey()); + regionsToMove.get(regionidx).setDestination(server.getKey().getServerName()); numTaken++; regionidx++; } - serverBalanceInfo.put(server.getKey(), new BalanceInfo(0, numTaken)); + serverBalanceInfo.put(server.getKey().getServerName(), new BalanceInfo(0, numTaken)); // If we still want to take some, increment needed if(numTaken < numToTake) { neededRegions += (numToTake - numTaken); @@ -236,7 +250,7 @@ public class LoadBalancer { // If we need more to fill min, grab one from each most loaded until enough if (neededRegions != 0) { // Walk down most loaded, grabbing one from each until we get enough - for(Map.Entry> server : + for(Map.Entry> server : serversByLoad.descendingMap().entrySet()) { BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey()); int idx = @@ -244,7 +258,7 @@ public class LoadBalancer { if (idx >= server.getValue().size()) break; HRegionInfo region = server.getValue().get(idx); if (region.isMetaRegion()) continue; // Don't move meta regions. - regionsToMove.add(new RegionPlan(region, server.getKey(), null)); + regionsToMove.add(new RegionPlan(region, server.getKey().getServerName(), null)); if(--neededRegions == 0) { // No more regions needed, done shedding break; @@ -256,9 +270,9 @@ public class LoadBalancer { // Assign each underloaded up to the min, then if leftovers, assign to max // Walk down least loaded, assigning to each to fill up to min - for(Map.Entry> server : + for(Map.Entry> server : serversByLoad.entrySet()) { - int regionCount = server.getKey().getLoad().getNumberOfRegions(); + int regionCount = server.getKey().getLoad(); if (regionCount >= min) break; BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey()); if(balanceInfo != null) { @@ -270,7 +284,7 @@ public class LoadBalancer { int numToTake = min - regionCount; int numTaken = 0; while(numTaken < numToTake && regionidx < regionsToMove.size()) { - regionsToMove.get(regionidx).setDestination(server.getKey()); + regionsToMove.get(regionidx).setDestination(server.getKey().getServerName()); numTaken++; regionidx++; } @@ -278,13 +292,13 @@ public class LoadBalancer { // If we still have regions to dish out, assign underloaded to max if(regionidx != regionsToMove.size()) { - for(Map.Entry> server : + for(Map.Entry> server : serversByLoad.entrySet()) { - int regionCount = server.getKey().getLoad().getNumberOfRegions(); + int regionCount = server.getKey().getLoad(); if(regionCount >= max) { break; } - regionsToMove.get(regionidx).setDestination(server.getKey()); + regionsToMove.get(regionidx).setDestination(server.getKey().getServerName()); regionidx++; if(regionidx == regionsToMove.size()) { break; @@ -300,9 +314,9 @@ public class LoadBalancer { ", numServers=" + numServers + ", serversOverloaded=" + serversOverloaded + ", serversUnderloaded=" + serversUnderloaded); StringBuilder sb = new StringBuilder(); - for (Map.Entry> e: clusterState.entrySet()) { + for (Map.Entry> e: clusterState.entrySet()) { if (sb.length() > 0) sb.append(", "); - sb.append(e.getKey().getServerName()); + sb.append(e.getKey().toString()); sb.append(" "); sb.append(e.getValue().size()); } @@ -322,11 +336,6 @@ public class LoadBalancer { * Stores additional per-server information about the regions added/removed * during the run of the balancing algorithm. * - * For servers that receive additional regions, we are not updating the number - * of regions in HServerInfo once we decide to reassign regions to a server, - * but we need this information later in the algorithm. This is stored in - * numRegionsAdded. - * * For servers that shed regions, we need to track which regions we have * already shed. nextRegionForUnload contains the index in the list * of regions on the server that is the next to be shed. @@ -367,13 +376,13 @@ public class LoadBalancer { * @return map of server to the regions it should take, or null if no * assignment is possible (ie. no regions or no servers) */ - public static Map> roundRobinAssignment( - List regions, List servers) { + public static Map> roundRobinAssignment( + List regions, List servers) { if(regions.size() == 0 || servers.size() == 0) { return null; } - Map> assignments = - new TreeMap>(); + Map> assignments = + new TreeMap>(); int numRegions = regions.size(); int numServers = servers.size(); int max = (int)Math.ceil((float)numRegions/numServers); @@ -383,7 +392,7 @@ public class LoadBalancer { } int regionIdx = 0; for (int j = 0; j < numServers; j++) { - HServerInfo server = servers.get((j+serverIdx) % numServers); + ServerName server = servers.get((j + serverIdx) % numServers); List serverRegions = new ArrayList(max); for (int i=regionIdx; i> retainAssignment( - Map regions, List servers) { - Map> assignments = - new TreeMap>(); - // Build a map of server addresses to server info so we can match things up - Map serverMap = - new TreeMap(); - for (HServerInfo server : servers) { - serverMap.put(server.getServerAddress(), server); - assignments.put(server, new ArrayList()); - } - for (Map.Entry region : regions.entrySet()) { - HServerAddress hsa = region.getValue(); - HServerInfo server = hsa == null? null: serverMap.get(hsa); - if (server != null) { - assignments.get(server).add(region.getKey()); + public static Map> retainAssignment( + Map regions, List servers) { + Map> assignments = + new TreeMap>(); + for (Map.Entry region : regions.entrySet()) { + ServerName sn = region.getValue(); + if (servers.contains(sn)) { + assignments.get(sn).add(region.getKey()); } else { - assignments.get(servers.get(rand.nextInt(assignments.size()))).add( - region.getKey()); + assignments.get(servers.get(rand.nextInt(assignments.size()))).add(region.getKey()); } } return assignments; @@ -553,17 +553,17 @@ public class LoadBalancer { * @param servers * @return map of regions to the server it should be assigned to */ - public static Map immediateAssignment( - List regions, List servers) { - Map assignments = - new TreeMap(); + public static Map immediateAssignment( + List regions, List servers) { + Map assignments = + new TreeMap(); for(HRegionInfo region : regions) { assignments.put(region, servers.get(rand.nextInt(servers.size()))); } return assignments; } - public static HServerInfo randomAssignment(List servers) { + public static ServerName randomAssignment(List servers) { if (servers == null || servers.isEmpty()) { LOG.warn("Wanted to do random assignment but no servers to assign to"); return null; @@ -583,21 +583,21 @@ public class LoadBalancer { */ public static class RegionPlan implements Comparable { private final HRegionInfo hri; - private final HServerInfo source; - private HServerInfo dest; + private final ServerName source; + private ServerName dest; /** * Instantiate a plan for a region move, moving the specified region from * the specified source server to the specified destination server. * * Destination server can be instantiated as null and later set - * with {@link #setDestination(HServerInfo)}. + * with {@link #setDestination(ServerName)}. * * @param hri region to be moved * @param source regionserver region should be moved from * @param dest regionserver region should be moved to */ - public RegionPlan(final HRegionInfo hri, HServerInfo source, HServerInfo dest) { + public RegionPlan(final HRegionInfo hri, ServerName source, ServerName dest) { this.hri = hri; this.source = source; this.dest = dest; @@ -606,7 +606,7 @@ public class LoadBalancer { /** * Set the destination server for the plan for this region. */ - public void setDestination(HServerInfo dest) { + public void setDestination(ServerName dest) { this.dest = dest; } @@ -614,7 +614,7 @@ public class LoadBalancer { * Get the source server for the plan for this region. * @return server info for source */ - public HServerInfo getSource() { + public ServerName getSource() { return source; } @@ -622,7 +622,7 @@ public class LoadBalancer { * Get the destination server for the plan for this region. * @return server info for destination */ - public HServerInfo getDestination() { + public ServerName getDestination() { return dest; } @@ -650,8 +650,8 @@ public class LoadBalancer { @Override public String toString() { return "hri=" + this.hri.getRegionNameAsString() + ", src=" + - (this.source == null? "": this.source.getServerName()) + - ", dest=" + (this.dest == null? "": this.dest.getServerName()); + (this.source == null? "": this.source.toString()) + + ", dest=" + (this.dest == null? "": this.dest.toString()); } } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index d4c9872..0ebea74 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -335,7 +335,7 @@ public class MasterCoprocessorHost } } - void preMove(final HRegionInfo region, final HServerInfo srcServer, final HServerInfo destServer) + void preMove(final HRegionInfo region, final ServerName srcServer, final ServerName destServer) throws UnknownRegionException { try { coprocessorLock.readLock().lock(); @@ -353,7 +353,7 @@ public class MasterCoprocessorHost } } - void postMove(final HRegionInfo region, final HServerInfo srcServer, final HServerInfo destServer) + void postMove(final HRegionInfo region, final ServerName srcServer, final ServerName destServer) throws UnknownRegionException { try { coprocessorLock.readLock().lock(); diff --git a/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 79ac17b..b32e46b 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.Map; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -33,9 +34,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.metrics.MasterMetrics; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.Store; @@ -151,9 +152,9 @@ public class MasterFileSystem { * Inspect the log directory to recover any log file without * an active region server. * @param onlineServers Map of online servers keyed by - * {@link HServerInfo#getServerName()} + * {@link ServerName} */ - void splitLogAfterStartup(final Map onlineServers) { + void splitLogAfterStartup(final Map onlineServers) { Path logsDirPath = new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME); try { if (!this.fs.exists(logsDirPath)) { @@ -173,7 +174,7 @@ public class MasterFileSystem { return; } for (FileStatus status : logFolders) { - String serverName = status.getPath().getName(); + ServerName serverName = new ServerName(status.getPath().getName()); if (onlineServers.get(serverName) == null) { LOG.info("Log folder " + status.getPath() + " doesn't belong " + "to a known region server, splitting"); @@ -185,10 +186,11 @@ public class MasterFileSystem { } } - public void splitLog(final String serverName) { + public void splitLog(final ServerName serverName) { this.splitLogLock.lock(); long splitTime = 0, splitLogSize = 0; - Path logDir = new Path(this.rootdir, HLog.getHLogDirectoryName(serverName)); + Path logDir = + new Path(this.rootdir, HLog.getHLogDirectoryName(serverName.toString())); try { HLogSplitter splitter = HLogSplitter.createLogSplitter( conf, rootdir, logDir, oldLogDir, this.fs); diff --git a/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 4d921da..71c7528 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -32,13 +33,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClockOutOfSyncException; -import org.apache.hadoop.hbase.HMsg; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; -import org.apache.hadoop.hbase.HServerLoad; import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.client.HConnection; @@ -47,12 +46,10 @@ import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler; import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler; -import org.apache.hadoop.hbase.master.metrics.MasterMetrics; import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException; /** - * The ServerManager class manages info about region servers - HServerInfo, - * load numbers, dying servers, etc. + * The ServerManager class manages info about region servers. *

* Maintains lists of online and dead servers. Processes the startups, * shutdowns, and deaths of region servers. @@ -70,23 +67,20 @@ public class ServerManager { // Set if we are to shutdown the cluster. private volatile boolean clusterShutdown = false; - /** The map of known server names to server info */ - private final Map onlineServers = - new ConcurrentHashMap(); + /** Map of registered servers */ + private final Map onlineServers = + new ConcurrentHashMap(); // TODO: This is strange to have two maps but HSI above is used on both sides /** * Map from full server-instance name to the RPC connection for this server. */ - private final Map serverConnections = - new HashMap(); + private final Map serverConnections = + new HashMap(); private final Server master; private final MasterServices services; - // Reporting to track master metrics. - private final MasterMetrics metrics; - private final DeadServer deadservers; private final long maxSkew; @@ -97,11 +91,9 @@ public class ServerManager { * @param services * @param metrics */ - public ServerManager(final Server master, final MasterServices services, - MasterMetrics metrics) { + public ServerManager(final Server master, final MasterServices services) { this.master = master; this.services = services; - this.metrics = metrics; Configuration c = master.getConfiguration(); maxSkew = c.getLong("hbase.master.maxclockskew", 30000); this.deadservers = @@ -110,11 +102,13 @@ public class ServerManager { /** * Let the server manager know a new regionserver has come online - * @param serverInfo + * @param isa + * @param serverStartcode * @param serverCurrentTime The current time of the region server in ms * @throws IOException */ - void regionServerStartup(final HServerInfo serverInfo, long serverCurrentTime) + void regionServerStartup(final InetSocketAddress isa, + final long serverStartcode, long serverCurrentTime) throws IOException { // Test for case where we get a region startup message from a regionserver // that has been quickly restarted but whose znode expiration handler has @@ -123,41 +117,41 @@ public class ServerManager { // is, reject the server and trigger its expiration. The next time it comes // in, it should have been removed from serverAddressToServerInfo and queued // for processing by ProcessServerShutdown. - HServerInfo info = new HServerInfo(serverInfo); - checkIsDead(info.getServerName(), "STARTUP"); - checkAlreadySameHostPort(info); - checkClockSkew(info, serverCurrentTime); - recordNewServer(info, false, null); + ServerName sn = new ServerName(isa.getHostName(), isa.getPort(), serverStartcode); + checkIsDead(sn, "STARTUP"); + checkAlreadySameHostPort(sn); + checkClockSkew(sn, serverCurrentTime); + recordNewServer(isa, sn); } /** * Test to see if we have a server of same host and port already. - * @param serverInfo + * @param serverName * @throws PleaseHoldException */ - void checkAlreadySameHostPort(final HServerInfo serverInfo) + void checkAlreadySameHostPort(final ServerName serverName) throws PleaseHoldException { - String hostAndPort = serverInfo.getServerAddress().toString(); - HServerInfo existingServer = - haveServerWithSameHostAndPortAlready(serverInfo.getHostnamePort()); + ServerName existingServer = haveServerWithSameHostAndPortAlready(serverName); if (existingServer != null) { - String message = "Server start rejected; we already have " + hostAndPort + - " registered; existingServer=" + existingServer + ", newServer=" + serverInfo; + String message = "Server serverName=" + serverName + + " rejected; we already have " + existingServer.toString() + + " registered with same hostname and port"; LOG.info(message); - if (existingServer.getStartCode() < serverInfo.getStartCode()) { + if (existingServer.getStartcode() < serverName.getStartcode()) { LOG.info("Triggering server recovery; existingServer " + - existingServer.getServerName() + " looks stale"); + existingServer + " looks stale"); expireServer(existingServer); } throw new PleaseHoldException(message); } } - private HServerInfo haveServerWithSameHostAndPortAlready(final String hostnamePort) { + private ServerName haveServerWithSameHostAndPortAlready(final ServerName serverName) { synchronized (this.onlineServers) { - for (Map.Entry e: this.onlineServers.entrySet()) { - if (e.getValue().getHostnamePort().equals(hostnamePort)) { - return e.getValue(); + for (Map.Entry e: this.onlineServers.entrySet()) { + if (e.getKey().getHostname().equals(serverName.getHostname()) && + e.getKey().getPort() == serverName.getPort()) { + return e.getKey(); } } } @@ -167,14 +161,16 @@ public class ServerManager { /** * Checks if the clock skew between the server and the master. If the clock * skew is too much it will throw an Exception. + * @param serverName Incoming servers's name + * @param serverCurrentTime * @throws ClockOutOfSyncException */ - private void checkClockSkew(final HServerInfo serverInfo, + private void checkClockSkew(final ServerName serverName, final long serverCurrentTime) throws ClockOutOfSyncException { long skew = System.currentTimeMillis() - serverCurrentTime; if (skew > maxSkew) { - String message = "Server " + serverInfo.getServerName() + " has been " + + String message = "Server " + serverName + " has been " + "rejected; Reported time is too far out of sync with master. " + "Time difference of " + skew + "ms > max allowed of " + maxSkew + "ms"; LOG.warn(message); @@ -184,193 +180,28 @@ public class ServerManager { /** * If this server is on the dead list, reject it with a LeaseStillHeldException - * @param serverName Server name formatted as host_port_startcode. + * @param serverName * @param what START or REPORT * @throws LeaseStillHeldException */ - private void checkIsDead(final String serverName, final String what) + private void checkIsDead(final ServerName serverName, final String what) throws YouAreDeadException { - if (!this.deadservers.isDeadServer(serverName)) return; + if (!this.deadservers.isDeadServer(serverName.toString())) return; String message = "Server " + what + " rejected; currently processing " + - serverName + " as dead server"; + serverName.toString() + " as dead server"; LOG.debug(message); throw new YouAreDeadException(message); } /** - * Adds the HSI to the RS list - * @param info The region server informations - * @param useInfoLoad True if the load from the info should be used; e.g. - * under a master failover - * @param hri Region interface. Can be null. + * Adds the onlineServers list. + * @param isa + * @param serverName The remote servers name. */ - void recordNewServer(HServerInfo info, boolean useInfoLoad, - HRegionInterface hri) { - HServerLoad load = useInfoLoad? info.getLoad(): new HServerLoad(); - String serverName = info.getServerName(); - LOG.info("Registering server=" + serverName + ", regionCount=" + - load.getLoad() + ", userLoad=" + useInfoLoad); - info.setLoad(load); - // TODO: Why did we update the RS location ourself? Shouldn't RS do this? - // masterStatus.getZooKeeper().updateRSLocationGetWatch(info, watcher); - // -- If I understand the question, the RS does not update the location - // because could be disagreement over locations because of DNS issues; only - // master does DNS now -- St.Ack 20100929. - this.onlineServers.put(serverName, info); - if (hri == null) { - serverConnections.remove(serverName); - } else { - serverConnections.put(serverName, hri); - } - } - - /** - * Called to process the messages sent from the region server to the master - * along with the heart beat. - * - * @param serverInfo - * @param msgs - * @param mostLoadedRegions Array of regions the region server is submitting - * as candidates to be rebalanced, should it be overloaded - * @return messages from master to region server indicating what region - * server should do. - * - * @throws IOException - */ - HMsg [] regionServerReport(final HServerInfo serverInfo, - final HMsg [] msgs, final HRegionInfo[] mostLoadedRegions) - throws IOException { - // Be careful. This method does returns in the middle. - HServerInfo info = new HServerInfo(serverInfo); - - // Check if dead. If it is, it'll get a 'You Are Dead!' exception. - checkIsDead(info.getServerName(), "REPORT"); - - // If we don't know this server, tell it shutdown. - HServerInfo storedInfo = this.onlineServers.get(info.getServerName()); - if (storedInfo == null) { - // Maybe we already have this host+port combo and its just different - // start code? - checkAlreadySameHostPort(info); - // Just let the server in. Presume master joining a running cluster. - // recordNewServer is what happens at the end of reportServerStartup. - // The only thing we are skipping is passing back to the regionserver - // the HServerInfo to use. Here we presume a master has already done - // that so we'll press on with whatever it gave us for HSI. - recordNewServer(info, true, null); - // If msgs, put off their processing but this is not enough because - // its possible that the next time the server reports in, we'll still - // not be up and serving. For example, if a split, we'll need the - // regions and servers setup in the master before the below - // handleSplitReport will work. TODO: FIx!! - if (msgs.length > 0) - throw new PleaseHoldException("FIX! Putting off " + - "message processing because not yet rwady but possible we won't be " + - "ready next on next report"); - } - - // Check startcodes - if (raceThatShouldNotHappenAnymore(storedInfo, info)) { - return HMsg.STOP_REGIONSERVER_ARRAY; - } - - for (HMsg msg: msgs) { - LOG.info("Received " + msg + " from " + serverInfo.getServerName()); - switch (msg.getType()) { - case REGION_SPLIT: - this.services.getAssignmentManager().handleSplitReport(serverInfo, - msg.getRegionInfo(), msg.getDaughterA(), msg.getDaughterB()); - break; - - default: - LOG.error("Unhandled msg type " + msg); - } - } - - HMsg [] reply = null; - int numservers = countOfRegionServers(); - if (this.clusterShutdown) { - if (numservers <= 2) { - // Shutdown needs to be staggered; the meta regions need to close last - // in case they need to be updated during the close melee. If <= 2 - // servers left, then these are the two that were carrying root and meta - // most likely (TODO: This presumes unsplittable meta -- FIX). Tell - // these servers can shutdown now too. - reply = HMsg.STOP_REGIONSERVER_ARRAY; - } - } - return processRegionServerAllsWell(info, mostLoadedRegions, reply); - } - - private boolean raceThatShouldNotHappenAnymore(final HServerInfo storedInfo, - final HServerInfo reportedInfo) { - if (storedInfo.getStartCode() != reportedInfo.getStartCode()) { - // TODO: I don't think this possible any more. We check startcodes when - // server comes in on regionServerStartup -- St.Ack - // This state is reachable if: - // 1) RegionServer A started - // 2) RegionServer B started on the same machine, then clobbered A in regionServerStartup. - // 3) RegionServer A returns, expecting to work as usual. - // The answer is to ask A to shut down for good. - LOG.warn("Race condition detected: " + reportedInfo.getServerName()); - synchronized (this.onlineServers) { - removeServerInfo(reportedInfo.getServerName()); - notifyOnlineServers(); - } - return true; - } - return false; - } - - /** - * RegionServer is checking in, no exceptional circumstances - * @param serverInfo - * @param mostLoadedRegions - * @param msgs - * @return - * @throws IOException - */ - private HMsg[] processRegionServerAllsWell(HServerInfo serverInfo, - final HRegionInfo[] mostLoadedRegions, HMsg[] msgs) - throws IOException { - // Refresh the info object and the load information - this.onlineServers.put(serverInfo.getServerName(), serverInfo); - HServerLoad load = serverInfo.getLoad(); - if (load != null && this.metrics != null) { - this.metrics.incrementRequests(load.getNumberOfRequests()); - } - // No more piggyback messages on heartbeats for other stuff - return msgs; - } - - /** - * @param serverName - * @return True if we removed server from the list. - */ - private boolean removeServerInfo(final String serverName) { - HServerInfo info = this.onlineServers.remove(serverName); - if (info != null) { - return true; - } - return false; - } - - /** - * Compute the average load across all region servers. - * Currently, this uses a very naive computation - just uses the number of - * regions being served, ignoring stats about number of requests. - * @return the average load - */ - public double getAverageLoad() { - int totalLoad = 0; - int numServers = 0; - double averageLoad = 0.0; - for (HServerInfo hsi : onlineServers.values()) { - numServers++; - totalLoad += hsi.getLoad().getNumberOfRegions(); - } - averageLoad = (double)totalLoad / (double)numServers; - return averageLoad; + void recordNewServer(final InetSocketAddress isa, final ServerName serverName) { + LOG.info("Registering server=" + serverName); + this.onlineServers.put(serverName, isa); + this.serverConnections.remove(serverName); } /** @return the count of active regionservers */ @@ -380,17 +211,9 @@ public class ServerManager { } /** - * @param name server name - * @return HServerInfo for the given server address - */ - public HServerInfo getServerInfo(String name) { - return this.onlineServers.get(name); - } - - /** * @return Read-only map of servers to serverinfo */ - public Map getOnlineServers() { + public Map getOnlineServers() { // Presumption is that iterating the returned Map is OK. synchronized (this.onlineServers) { return Collections.unmodifiableMap(this.onlineServers); @@ -409,40 +232,11 @@ public class ServerManager { return this.deadservers.areDeadServersInProgress(); } - /** - * @param hsa - * @return The HServerInfo whose HServerAddress is hsa or null - * if nothing found. - */ - public HServerInfo getHServerInfo(final HServerAddress hsa) { - synchronized(this.onlineServers) { - // TODO: This is primitive. Do a better search. - for (Map.Entry e: this.onlineServers.entrySet()) { - if (e.getValue().getServerAddress().equals(hsa)) { - return e.getValue(); - } - } - } - return null; - } - - private void notifyOnlineServers() { - synchronized (this.onlineServers) { - this.onlineServers.notifyAll(); - } - } - - /* - * Wait on regionservers to report in - * with {@link #regionServerReport(HServerInfo, HMsg[])} so they get notice - * the master is going down. Waits until all region servers come back with - * a MSG_REGIONSERVER_STOP. - */ void letRegionServersShutdown() { synchronized (onlineServers) { while (onlineServers.size() > 0) { StringBuilder sb = new StringBuilder(); - for (String key: this.onlineServers.keySet()) { + for (ServerName key: this.onlineServers.keySet()) { if (sb.length() > 0) { sb.append(", "); } @@ -462,32 +256,29 @@ public class ServerManager { * Expire the passed server. Add it to list of deadservers and queue a * shutdown processing. */ - public synchronized void expireServer(final HServerInfo hsi) { - // First check a server to expire. ServerName is of the form: - // , , - String serverName = hsi.getServerName(); - HServerInfo info = this.onlineServers.get(serverName); - if (info == null) { - LOG.warn("Received expiration of " + hsi.getServerName() + + public synchronized void expireServer(final ServerName serverName) { + InetSocketAddress serverAddress = this.onlineServers.get(serverName); + if (serverAddress == null) { + LOG.warn("Received expiration of " + serverName + " but server is not currently online"); return; } if (this.deadservers.contains(serverName)) { // TODO: Can this happen? It shouldn't be online in this case? - LOG.warn("Received expiration of " + hsi.getServerName() + + LOG.warn("Received expiration of " + serverName + " but server shutdown is already in progress"); return; } // Remove the server from the known servers lists and update load info BUT // add to deadservers first; do this so it'll show in dead servers list if // not in online servers list. - this.deadservers.add(serverName); + this.deadservers.add(serverName.toString()); this.onlineServers.remove(serverName); this.serverConnections.remove(serverName); // If cluster is going down, yes, servers are going to be expiring; don't // process as a dead server if (this.clusterShutdown) { - LOG.info("Cluster shutdown set; " + hsi.getServerName() + + LOG.info("Cluster shutdown set; " + serverName + " expired; onlineServers=" + this.onlineServers.size()); if (this.onlineServers.isEmpty()) { master.stop("Cluster shutdown set; onlineServer=0"); @@ -498,9 +289,8 @@ public class ServerManager { // Was this server carrying root? boolean carryingRoot; try { - HServerAddress address = ct.getRootLocation(); - carryingRoot = address != null && - hsi.getServerAddress().equals(address); + ServerName address = ct.getRootLocation(); + carryingRoot = isSameAddress(address, serverAddress); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.info("Interrupted"); @@ -511,21 +301,32 @@ public class ServerManager { // run into fact that meta is dead). I can ask assignment manager. It // has an inmemory list of who has what. This list will be cleared as we // process the dead server but should be find asking it now. - HServerAddress address = ct.getMetaLocation(); - boolean carryingMeta = - address != null && hsi.getServerAddress().equals(address); + ServerName address = ct.getMetaLocation(); + boolean carryingMeta = isSameAddress(address, serverAddress); if (carryingRoot || carryingMeta) { this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master, - this.services, this.deadservers, info, carryingRoot, carryingMeta)); + this.services, this.deadservers, serverName, carryingRoot, carryingMeta)); } else { this.services.getExecutorService().submit(new ServerShutdownHandler(this.master, - this.services, this.deadservers, info)); + this.services, this.deadservers, serverName)); } LOG.debug("Added=" + serverName + " to dead servers, submitted shutdown handler to be executed, root=" + carryingRoot + ", meta=" + carryingMeta); } + private static boolean isSameAddress(final ServerName sn, + final InetSocketAddress serverAddress) { + if (sn == null) return false; + return isSameAddress(sn.getHostname(), sn.getPort(), serverAddress); + } + + private static boolean isSameAddress(final String hostname, final int port, + final InetSocketAddress serverAddress) { + return hostname.equals(serverAddress.getAddress().getHostAddress()) && + port == serverAddress.getPort(); + } + // RPC methods to region servers /** @@ -536,12 +337,12 @@ public class ServerManager { * @param server server to open a region * @param region region to open */ - public void sendRegionOpen(HServerInfo server, HRegionInfo region) + public void sendRegionOpen(final ServerName server, HRegionInfo region) throws IOException { HRegionInterface hri = getServerConnection(server); if (hri == null) { - LOG.warn("Attempting to send OPEN RPC to server " + server.getServerName() - + " failed because no RPC connection found to this server"); + LOG.warn("Attempting to send OPEN RPC to server " + server.toString() + + " failed because no RPC connection found to this server"); return; } hri.openRegion(region); @@ -555,12 +356,12 @@ public class ServerManager { * @param server server to open a region * @param regions regions to open */ - public void sendRegionOpen(HServerInfo server, List regions) + public void sendRegionOpen(ServerName server, List regions) throws IOException { HRegionInterface hri = getServerConnection(server); if (hri == null) { - LOG.warn("Attempting to send OPEN RPC to server " + server.getServerName() - + " failed because no RPC connection found to this server"); + LOG.warn("Attempting to send OPEN RPC to server " + server.toString() + + " failed because no RPC connection found to this server"); return; } hri.openRegions(regions); @@ -576,13 +377,13 @@ public class ServerManager { * @return true if server acknowledged close, false if not * @throws IOException */ - public boolean sendRegionClose(HServerInfo server, HRegionInfo region) + public boolean sendRegionClose(ServerName server, HRegionInfo region) throws IOException { if (server == null) throw new NullPointerException("Passed server is null"); HRegionInterface hri = getServerConnection(server); if (hri == null) { throw new IOException("Attempting to send CLOSE RPC to server " + - server.getServerName() + " for region " + + server.toString() + " for region " + region.getRegionNameAsString() + " failed because no RPC connection found to this server"); } @@ -590,31 +391,32 @@ public class ServerManager { } /** - * @param info + * @param sn * @return * @throws IOException * @throws RetriesExhaustedException wrapping a ConnectException if failed * putting up proxy. */ - private HRegionInterface getServerConnection(HServerInfo info) + private HRegionInterface getServerConnection(final ServerName sn) throws IOException { HConnection connection = HConnectionManager.getConnection(this.master.getConfiguration()); - HRegionInterface hri = serverConnections.get(info.getServerName()); + HRegionInterface hri = this.serverConnections.get(sn.toString()); if (hri == null) { - LOG.debug("New connection to " + info.getServerName()); - hri = connection.getHRegionConnection(info.getServerAddress(), false); - this.serverConnections.put(info.getServerName(), hri); + LOG.debug("New connection to " + sn.toString()); + // TODO: Remove need for an HServerAddress + HServerAddress hsa = new HServerAddress(this.onlineServers.get(sn)); + hri = connection.getHRegionConnection(hsa, false); + this.serverConnections.put(sn, hri); } return hri; } /** * Waits for the regionservers to report in. - * @return Count of regions out on cluster * @throws InterruptedException */ - public int waitForRegionServers() + public void waitForRegionServers() throws InterruptedException { long interval = this.master.getConfiguration(). getLong("hbase.master.wait.on.regionservers.interval", 3000); @@ -632,31 +434,17 @@ public class ServerManager { } oldcount = count; } - // Count how many regions deployed out on cluster. If fresh start, it'll - // be none but if not a fresh start, we'll have registered servers when - // they came in on the {@link #regionServerReport(HServerInfo)} as opposed to - // {@link #regionServerStartup(HServerInfo)} and it'll be carrying an - // actual server load. - int regionCount = 0; - for (Map.Entry e: this.onlineServers.entrySet()) { - HServerLoad load = e.getValue().getLoad(); - if (load != null) regionCount += load.getLoad(); - } - LOG.info("Exiting wait on regionserver(s) to checkin; count=" + count + - ", stopped=" + this.master.isStopped() + - ", count of regions out on cluster=" + regionCount); - return regionCount; } /** * @return A copy of the internal list of online servers. */ - public List getOnlineServersList() { + public List getOnlineServersList() { // TODO: optimize the load balancer call so we don't need to make a new list - return new ArrayList(onlineServers.values()); + return new ArrayList(this.onlineServers.keySet()); } - public boolean isServerOnline(String serverName) { + public boolean isServerOnline(ServerName serverName) { return onlineServers.containsKey(serverName); } @@ -673,6 +461,5 @@ public class ServerManager { * Stop the ServerManager. Currently does nothing. */ public void stop() { - } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java b/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java index eb01a6a..e5385b7 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java +++ b/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java @@ -19,8 +19,8 @@ */ package org.apache.hadoop.hbase.master.handler; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.DeadServer; import org.apache.hadoop.hbase.master.MasterServices; @@ -34,9 +34,9 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler { public MetaServerShutdownHandler(final Server server, final MasterServices services, - final DeadServer deadServers, final HServerInfo hsi, + final DeadServer deadServers, final ServerName serverName, final boolean carryingRoot, final boolean carryingMeta) { - super(server, services, deadServers, hsi, EventType.M_META_SERVER_SHUTDOWN); + super(server, services, deadServers, serverName, EventType.M_META_SERVER_SHUTDOWN); this.carryingRoot = carryingRoot; this.carryingMeta = carryingMeta; } @@ -50,4 +50,4 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler { boolean isCarryingMeta() { return this.carryingMeta; } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java b/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java index 0f0ae65..ebf1e69 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java +++ b/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java @@ -22,8 +22,8 @@ package org.apache.hadoop.hbase.master.handler; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.zookeeper.ZKAssign; @@ -36,7 +36,7 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf private static final Log LOG = LogFactory.getLog(OpenedRegionHandler.class); private final AssignmentManager assignmentManager; private final HRegionInfo regionInfo; - private final HServerInfo serverInfo; + private final ServerName sn; private final OpenedPriority priority; private enum OpenedPriority { @@ -55,11 +55,11 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf public OpenedRegionHandler(Server server, AssignmentManager assignmentManager, HRegionInfo regionInfo, - HServerInfo serverInfo) { + ServerName sn) { super(server, EventType.RS_ZK_REGION_OPENED); this.assignmentManager = assignmentManager; this.regionInfo = regionInfo; - this.serverInfo = serverInfo; + this.sn = sn; if(regionInfo.isRootRegion()) { priority = OpenedPriority.ROOT; } else if(regionInfo.isMetaRegion()) { @@ -91,7 +91,7 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf server.abort("Error deleting OPENED node in ZK for transition ZK node (" + regionInfo.getEncodedName() + ")", e); } - this.assignmentManager.regionOnline(regionInfo, serverInfo); + this.assignmentManager.regionOnline(regionInfo, this.sn); if (this.assignmentManager.getZKTable().isDisablingOrDisabledTable( regionInfo.getTableDesc().getNameAsString())) { LOG.debug("Opened region " + regionInfo.getRegionNameAsString() + " but " @@ -99,7 +99,7 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf assignmentManager.unassign(regionInfo); } else { LOG.debug("Opened region " + regionInfo.getRegionNameAsString() + - " on " + serverInfo.getServerName()); + " on " + this.sn.toString()); } } } \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java b/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java index 852efe7..1fb6d29 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java +++ b/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java @@ -28,8 +28,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.MetaReader; @@ -47,29 +47,29 @@ import org.apache.zookeeper.KeeperException; /** * Process server shutdown. * Server-to-handle must be already in the deadservers lists. See - * {@link ServerManager#expireServer(HServerInfo)}. + * {@link ServerManager#expireServer(ServerName)} */ public class ServerShutdownHandler extends EventHandler { private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class); - private final HServerInfo hsi; + private final ServerName serverName; private final Server server; private final MasterServices services; private final DeadServer deadServers; public ServerShutdownHandler(final Server server, final MasterServices services, - final DeadServer deadServers, final HServerInfo hsi) { - this(server, services, deadServers, hsi, EventType.M_SERVER_SHUTDOWN); + final DeadServer deadServers, final ServerName serverName) { + this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN); } ServerShutdownHandler(final Server server, final MasterServices services, - final DeadServer deadServers, final HServerInfo hsi, EventType type) { + final DeadServer deadServers, final ServerName serverName, EventType type) { super(server, type); - this.hsi = hsi; + this.serverName = serverName; this.server = server; this.services = services; this.deadServers = deadServers; - if (!this.deadServers.contains(hsi.getServerName())) { - LOG.warn(hsi.getServerName() + " is NOT in deadservers; it should be!"); + if (!this.deadServers.contains(this.serverName)) { + LOG.warn(this.serverName + " is NOT in deadservers; it should be!"); } } @@ -89,7 +89,7 @@ public class ServerShutdownHandler extends EventHandler { @Override public void process() throws IOException { - final String serverName = this.hsi.getServerName(); + final ServerName serverName = this.serverName; LOG.info("Splitting logs for " + serverName); this.services.getMasterFileSystem().splitLog(serverName); @@ -99,7 +99,7 @@ public class ServerShutdownHandler extends EventHandler { // OFFLINE? -- and then others after like CLOSING that depend on log // splitting. List regionsInTransition = - this.services.getAssignmentManager().processServerShutdown(this.hsi); + this.services.getAssignmentManager().processServerShutdown(this.serverName); // Assign root and meta if we were carrying them. if (isCarryingRoot()) { // -ROOT- @@ -122,7 +122,7 @@ public class ServerShutdownHandler extends EventHandler { try { this.server.getCatalogTracker().waitForMeta(); hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(), - this.hsi); + this.serverName); break; } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -154,7 +154,7 @@ public class ServerShutdownHandler extends EventHandler { this.services.getAssignmentManager().assign(e.getKey(), true); } } - this.deadServers.finish(serverName); + this.deadServers.finish(serverName.toString()); LOG.info("Finished processing of shutdown of " + serverName); } diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6f80fb0..13be5de 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -58,12 +58,12 @@ import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.UnknownScannerException; -import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; @@ -93,7 +93,6 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.Writable; -import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.StringUtils; import com.google.common.collect.ClassToInstanceMap; @@ -229,7 +228,7 @@ public class HRegion implements HeapSize { // , Writable{ final long memstoreFlushSize; private volatile long lastFlushTime; final RegionServerServices rsServices; - private List> recentFlushes = new ArrayList>(); + private List> recentFlushes = new ArrayList>(); private final long blockingMemStoreSize; final long threadWakeFrequency; // Used to guard closes diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index b3f47d4..2f6ed86 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -20,11 +20,10 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.io.StringWriter; import java.lang.Thread.UncaughtExceptionHandler; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; -import java.lang.management.ManagementFactory; -import java.lang.management.MemoryUsage; import java.lang.reflect.Constructor; import java.lang.reflect.Method; import java.net.BindException; @@ -44,12 +43,11 @@ import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.commons.lang.NotImplementedException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -61,20 +59,18 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; -import org.apache.hadoop.hbase.HMsg; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerInfo; -import org.apache.hadoop.hbase.HServerLoad; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterAddressTracker; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.UnknownRowLockException; import org.apache.hadoop.hbase.UnknownScannerException; -import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.RootLocationEditor; @@ -118,6 +114,7 @@ import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.WALObserver; import org.apache.hadoop.hbase.replication.regionserver.Replication; +import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CompressionTest; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -135,6 +132,7 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.DNS; import org.apache.zookeeper.KeeperException; +import org.codehaus.jackson.map.ObjectMapper; import com.google.common.base.Function; import com.google.common.collect.Lists; @@ -165,7 +163,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, // If false, the file system has become unavailable protected volatile boolean fsOk; - protected HServerInfo serverInfo; protected final Configuration conf; private final HConnection connection; @@ -182,7 +179,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, new HashMap(); protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - private final LinkedBlockingQueue outboundMsgs = new LinkedBlockingQueue(); final int numRetries; protected final int threadWakeFrequency; @@ -241,7 +237,8 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, // flag set after we're done setting up server threads (used for testing) protected volatile boolean isOnline; - final Map scanners = new ConcurrentHashMap(); + final Map scanners = + new ConcurrentHashMap(); // zookeeper connection and watcher private ZooKeeperWatcher zooKeeper; @@ -260,10 +257,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, private final int rpcTimeout; - // The main region server thread. - @SuppressWarnings("unused") - private Thread regionServerThread; - // Instance of the hbase executor service. private ExecutorService service; @@ -271,78 +264,96 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, private Replication replicationHandler; /** + * The server name the Master sees us as. Its made from the hostname the + * master passes us, port, and server startcode. Gets set after registration + * against Master. The hostname can differ from the hostname in {@link #isa} + * but usually doesn't if both servers resolve . + */ + private ServerName serverNameFromMasterPOV; + + // Port we put up the webui on. + private int webuiport = -1; + + /** + * This servers startcode. + */ + private final long startcode; + + /** * Starts a HRegionServer at the default location * * @param conf * @throws IOException * @throws InterruptedException */ - public HRegionServer(Configuration conf) throws IOException, InterruptedException { + public HRegionServer(Configuration conf) + throws IOException, InterruptedException { this.fsOk = true; this.conf = conf; this.connection = HConnectionManager.getConnection(conf); this.isOnline = false; - - // check to see if the codec list is available: - String [] codecs = conf.getStrings("hbase.regionserver.codecs", - (String[])null); - if (codecs != null) { - for (String codec : codecs) { - if (!CompressionTest.testCompression(codec)) { - throw new IOException("Compression codec " + codec + - " not supported, aborting RS construction"); - } - } - } + checkCodecs(this.conf); // Config'ed params this.numRetries = conf.getInt("hbase.client.retries.number", 10); this.threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, - 10 * 1000); + 10 * 1000); this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000); - sleeper = new Sleeper(this.msgInterval, this); + this.sleeper = new Sleeper(this.msgInterval, this); this.maxScannerResultSize = conf.getLong( - HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); + HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); this.numRegionsToReport = conf.getInt( - "hbase.regionserver.numregionstoreport", 10); + "hbase.regionserver.numregionstoreport", 10); this.rpcTimeout = conf.getInt( - HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + HConstants.HBASE_RPC_TIMEOUT_KEY, + HConstants.DEFAULT_HBASE_RPC_TIMEOUT); this.abortRequested = false; this.stopped = false; - // Server to handle client requests - String machineName = DNS.getDefaultHost(conf.get( - "hbase.regionserver.dns.interface", "default"), conf.get( - "hbase.regionserver.dns.nameserver", "default")); - String addressStr = machineName + ":" + - conf.get(HConstants.REGIONSERVER_PORT, - Integer.toString(HConstants.DEFAULT_REGIONSERVER_PORT)); - HServerAddress address = new HServerAddress(addressStr); + // Server to handle client requests. + String hostname = DNS.getDefaultHost( + conf.get("hbase.regionserver.dns.interface", "default"), + conf.get("hbase.regionserver.dns.nameserver", "default")); + int port = conf.getInt(HConstants.REGIONSERVER_PORT, + HConstants.DEFAULT_REGIONSERVER_PORT); + // Creation of a HSA will force a resolve. + InetSocketAddress isa = new InetSocketAddress(hostname, port); + if (isa.getAddress() == null) { + throw new IllegalArgumentException("Failed resolve of " + isa); + } this.server = HBaseRPC.getServer(this, - new Class[]{HRegionInterface.class, HBaseRPCErrorHandler.class, + new Class[]{HRegionInterface.class, HBaseRPCErrorHandler.class, OnlineRegions.class}, - address.getBindAddress(), - address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10), + isa.getHostName(), // BindAddress is IP we got for this server. + isa.getPort(), + conf.getInt("hbase.regionserver.handler.count", 10), conf.getInt("hbase.regionserver.metahandler.count", 10), false, conf, QOS_THRESHOLD); this.server.setErrorHandler(this); this.server.setQosFunction(new QosFunction()); + this.startcode = System.currentTimeMillis(); + } - // HServerInfo can be amended by master. See below in reportForDuty. - this.serverInfo = new HServerInfo(new HServerAddress(new InetSocketAddress( - address.getBindAddress(), this.server.getListenerAddress().getPort())), - System.currentTimeMillis(), this.conf.getInt( - "hbase.regionserver.info.port", 60030), machineName); - if (this.serverInfo.getServerAddress() == null) { - throw new NullPointerException("Server address cannot be null; " - + "hbase-958 debugging"); + /** + * Run test on configured codecs to make sure supporting libs are in place. + * @param c + * @throws IOException + */ + private static void checkCodecs(final Configuration c) throws IOException { + // check to see if the codec list is available: + String [] codecs = c.getStrings("hbase.regionserver.codecs", (String[])null); + if (codecs == null) return; + for (String codec : codecs) { + if (!CompressionTest.testCompression(codec)) { + throw new IOException("Compression codec " + codec + + " not supported, aborting RS construction"); + } } } @@ -355,6 +366,10 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, int priority() default 0; } + /** + * Utility used ensuring higher quality of service for priority rpcs; e.g. + * rpcs to .META. and -ROOT-, etc. + */ class QosFunction implements Function { private final Map annotatedQos; @@ -446,14 +461,13 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } /** - * Creates all of the state that needs to be reconstructed in case we are - * doing a restart. This is shared between the constructor and restart(). Both - * call it. + * All initialization needed before we go register with Master. * * @throws IOException * @throws InterruptedException */ - private void initialize() throws IOException, InterruptedException { + private void preRegistrationInitialization() + throws IOException, InterruptedException { try { initializeZooKeeper(); initializeThreads(); @@ -479,8 +493,8 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, */ private void initializeZooKeeper() throws IOException, InterruptedException { // Open connection to zookeeper and set primary watcher - zooKeeper = new ZooKeeperWatcher(conf, REGIONSERVER + ":" + - serverInfo.getServerAddress().getPort(), this); + this.zooKeeper = new ZooKeeperWatcher(conf, REGIONSERVER + ":" + + this.server.getListenerAddress().getPort(), this); // Create the master address manager, register with zk, and start it. Then // block until a master is available. No point in starting up if no master @@ -525,7 +539,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } private void initializeThreads() throws IOException { - // Cache flushing thread. this.cacheFlusher = new MemStoreFlusher(conf, this); @@ -534,10 +547,10 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, // Background thread to check for major compactions; needed if region // has not gotten updates in a while. Make it run at a lesser frequency. - int multiplier = this.conf.getInt(HConstants.THREAD_WAKE_FREQUENCY - + ".multiplier", 1000); + int multiplier = this.conf.getInt(HConstants.THREAD_WAKE_FREQUENCY + + ".multiplier", 1000); this.majorCompactionChecker = new MajorCompactionChecker(this, - this.threadWakeFrequency * multiplier, this); + this.threadWakeFrequency * multiplier, this); this.leases = new Leases((int) conf.getLong( HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, @@ -546,28 +559,28 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } /** - * The HRegionServer sticks in this loop until closed. It repeatedly checks in - * with the HMaster, sending heartbeats & reports, and receiving HRegion - * load/unload instructions. + * The HRegionServer sticks in this loop until closed. */ public void run() { - try { - // Initialize threads and wait for a master - initialize(); + // Do pre-registration initializations; zookeeper, lease threads, etc. + preRegistrationInitialization(); } catch (Exception e) { abort("Fatal exception during initialization", e); } - this.regionServerThread = Thread.currentThread(); try { + // Try and register with the Master; tell it we are here. while (!this.stopped) { if (tryReportForDuty()) break; + LOG.warn("No response on reportForDuty. Sleeping and then retrying."); + this.sleeper.sleep(); } + + // We registered with the Master. Go into run mode. long lastMsg = 0; - List outboundMessages = new ArrayList(); // The main run loop. - for (int tries = 0; !this.stopped && isHealthy();) { + while (!this.stopped && isHealthy()) { if (!isClusterUp()) { if (isOnlineRegionsEmpty()) { stop("Exiting; cluster shutdown set and not carrying any regions"); @@ -579,50 +592,18 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } } long now = System.currentTimeMillis(); - // Drop into the send loop if msgInterval has elapsed or if something - // to send. If we fail talking to the master, then we'll sleep below - // on poll of the outboundMsgs blockingqueue. - if ((now - lastMsg) >= msgInterval || !outboundMessages.isEmpty()) { - try { - doMetrics(); - tryRegionServerReport(outboundMessages); - lastMsg = System.currentTimeMillis(); - // Reset tries count if we had a successful transaction. - tries = 0; - if (this.stopped) continue; - } catch (Exception e) { // FindBugs REC_CATCH_EXCEPTION - // Two special exceptions could be printed out here, - // PleaseHoldException and YouAreDeadException - if (e instanceof IOException) { - e = RemoteExceptionHandler.checkIOException((IOException) e); - } - if (e instanceof YouAreDeadException) { - // This will be caught and handled as a fatal error below - throw e; - } - tries++; - if (tries > 0 && (tries % this.numRetries) == 0) { - // Check filesystem every so often. - checkFileSystem(); - } - if (this.stopped) { - continue; - } - LOG.warn("Attempt=" + tries, e); - // No point retrying immediately; this is probably connection to - // master issue. Doing below will cause us to sleep. - lastMsg = System.currentTimeMillis(); - } + if ((now - lastMsg) >= msgInterval) { + doMetrics(); + lastMsg = System.currentTimeMillis(); } - now = System.currentTimeMillis(); - HMsg msg = this.outboundMsgs.poll((msgInterval - (now - lastMsg)), TimeUnit.MILLISECONDS); - if (msg != null) outboundMessages.add(msg); + this.sleeper.sleep(); } // for } catch (Throwable t) { if (!checkOOME(t)) { abort("Unhandled exception: " + t.getMessage(), t); } } + // Run shutdown. this.leases.closeAfterLeasesExpire(); this.server.stop(); if (this.infoServer != null) { @@ -646,6 +627,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, if (this.hlogRoller != null) this.hlogRoller.interruptIfNecessary(); if (this.majorCompactionChecker != null) this.majorCompactionChecker.interrupt(); + String address = this.server.getListenerAddress().toString(); if (this.killed) { // Just skip out w/o closing regions. } else if (abortRequested) { @@ -653,12 +635,12 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, closeAllRegions(abortRequested); // Don't leave any open file handles closeWAL(false); } - LOG.info("aborting server at: " + this.serverInfo.getServerName()); + LOG.info("aborting server at: " + address); } else { closeAllRegions(abortRequested); closeWAL(true); closeAllScanners(); - LOG.info("stopping server at: " + this.serverInfo.getServerName()); + LOG.info("stopping server at: " + address); } // Interrupt catalog tracker here in case any regions being opened out in // handlers are stuck waiting on meta or root. @@ -714,59 +696,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } } - List tryRegionServerReport(final List outboundMessages) - throws IOException { - this.serverInfo.setLoad(buildServerLoad()); - this.requestCount.set(0); - addOutboundMsgs(outboundMessages); - HMsg [] msgs = null; - while (!this.stopped) { - try { - msgs = this.hbaseMaster.regionServerReport(this.serverInfo, - outboundMessages.toArray(HMsg.EMPTY_HMSG_ARRAY), - getMostLoadedRegions()); - break; - } catch (IOException ioe) { - if (ioe instanceof RemoteException) { - ioe = ((RemoteException)ioe).unwrapRemoteException(); - } - if (ioe instanceof YouAreDeadException) { - // This will be caught and handled as a fatal error in run() - throw ioe; - } - // Couldn't connect to the master, get location from zk and reconnect - // Method blocks until new master is found or we are stopped - getMaster(); - } - } - updateOutboundMsgs(outboundMessages); - outboundMessages.clear(); - - for (int i = 0; !this.stopped && msgs != null && i < msgs.length; i++) { - LOG.info(msgs[i].toString()); - // Intercept stop regionserver messages - if (msgs[i].getType().equals(HMsg.Type.STOP_REGIONSERVER)) { - stop("Received " + msgs[i]); - continue; - } - LOG.warn("NOT PROCESSING " + msgs[i] + " -- WHY IS MASTER SENDING IT TO US?"); - } - return outboundMessages; - } - - private HServerLoad buildServerLoad() { - MemoryUsage memory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); - HServerLoad hsl = new HServerLoad(requestCount.get(), - (int)(memory.getUsed() / 1024 / 1024), - (int) (memory.getMax() / 1024 / 1024)); - synchronized (this.onlineRegions) { - for (HRegion r : this.onlineRegions.values()) { - hsl.addRegionInfo(createRegionLoad(r)); - } - } - return hsl; - } - private void closeWAL(final boolean delete) { try { if (this.hlog != null) { @@ -794,61 +723,23 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } /* - * Add to the passed msgs messages to pass to the master. - * - * @param msgs Current outboundMsgs array; we'll add messages to this List. - */ - private void addOutboundMsgs(final List msgs) { - if (msgs.isEmpty()) { - this.outboundMsgs.drainTo(msgs); - return; - } - OUTER: for (HMsg m : this.outboundMsgs) { - for (HMsg mm : msgs) { - // Be careful don't add duplicates. - if (mm.equals(m)) { - continue OUTER; - } - } - msgs.add(m); - } - } - - /* - * Remove from this.outboundMsgs those messsages we sent the master. - * - * @param msgs Messages we sent the master. - */ - private void updateOutboundMsgs(final List msgs) { - if (msgs.isEmpty()) { - return; - } - for (HMsg m : this.outboundMsgs) { - for (HMsg mm : msgs) { - if (mm.equals(m)) { - this.outboundMsgs.remove(m); - break; - } - } - } - } - - /* * Run init. Sets up hlog and starts up all server threads. * * @param c Extra configuration. */ - protected void handleReportForDutyResponse(final MapWritable c) throws IOException { + protected void handleReportForDutyResponse(final MapWritable c) + throws IOException { try { - for (Map.Entry e : c.entrySet()) { - + for (Map.Entry e :c.entrySet()) { String key = e.getKey().toString(); - // Use the address the master passed us - if (key.equals("hbase.regionserver.address")) { - HServerAddress hsa = (HServerAddress) e.getValue(); - LOG.info("Master passed us address to use. Was=" - + this.serverInfo.getServerAddress() + ", Now=" + hsa.toString()); - this.serverInfo.setServerAddress(hsa); + // The hostname the master sees us as. + if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) { + String hostnameFromMasterPOV = e.getValue().toString(); + LOG.info("Master passed us hostname to use. Was=" + + this.server.getListenerAddress().getHostName() + + ", Now=" + hostnameFromMasterPOV); + this.serverNameFromMasterPOV = new ServerName(hostnameFromMasterPOV, + this.server.getListenerAddress().getPort(), this.startcode); continue; } String value = e.getValue().toString(); @@ -857,13 +748,12 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } this.conf.set(key, value); } - + // hack! Maps DFSClient => RegionServer for logs. HDFS made this // config param for task trackers, but we can piggyback off of it. if (this.conf.get("mapred.task.id") == null) { - this.conf.set("mapred.task.id", - "hb_rs_" + this.serverInfo.getServerName() + "_" + - System.currentTimeMillis()); + this.conf.set("mapred.task.id", "hb_rs_" + + this.serverNameFromMasterPOV.toString() + "_" + System.currentTimeMillis()); } // Master sent us hbase.rootdir to use. Should be fully qualified @@ -879,7 +769,10 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, // Init in here rather than in constructor after thread name has been set this.metrics = new RegionServerMetrics(); startServiceThreads(); - LOG.info("Serving as " + this.serverInfo.getServerName() + + String znodePath = getRSZNodePath(); + ZKUtil.createEphemeralNodeAndWatch(this.zooKeeper, znodePath, + RegionServerZNodeContent.getRegionServerZNodeBytes(this.webuiport)); + LOG.info("Serving as " + this.serverNameFromMasterPOV + ", RPC listening on " + this.server.getListenerAddress() + ", sessionid=0x" + Long.toHexString(this.zooKeeper.getZooKeeper().getSessionId())); @@ -892,43 +785,9 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } } - /* - * @param r Region to get RegionLoad for. - * - * @return RegionLoad instance. - * - * @throws IOException - */ - private HServerLoad.RegionLoad createRegionLoad(final HRegion r) { - byte[] name = r.getRegionName(); - int stores = 0; - int storefiles = 0; - int storefileSizeMB = 0; - int memstoreSizeMB = (int) (r.memstoreSize.get() / 1024 / 1024); - int storefileIndexSizeMB = 0; - synchronized (r.stores) { - stores += r.stores.size(); - for (Store store : r.stores.values()) { - storefiles += store.getStorefilesCount(); - storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024); - storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024); - } - } - return new HServerLoad.RegionLoad(name, stores, storefiles, - storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB); - } - - /** - * @param encodedRegionName - * @return An instance of RegionLoad. - * @throws IOException - */ - public HServerLoad.RegionLoad createRegionLoad(final String encodedRegionName) { - HRegion r = null; - synchronized (this.onlineRegions) { - r = this.onlineRegions.get(encodedRegionName); - } - return createRegionLoad(r); + private String getRSZNodePath() { + return ZKUtil.joinZNode(this.zooKeeper.rsZNode, + this.serverNameFromMasterPOV.toString()); } /* @@ -1079,14 +938,12 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, */ private HLog setupWALAndReplication() throws IOException { final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); - Path logdir = new Path(rootDir, HLog.getHLogDirectoryName(this.serverInfo)); - if (LOG.isDebugEnabled()) { - LOG.debug("logdir=" + logdir); - } + Path logdir = new Path(rootDir, + HLog.getHLogDirectoryName(this.serverNameFromMasterPOV.toString())); + if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir); if (this.fs.exists(logdir)) { - throw new RegionServerRunningException("Region server already " - + "running at " + this.serverInfo.getServerName() - + " because logdir " + logdir.toString() + " exists"); + throw new RegionServerRunningException("Region server has already " + + "created directory at " + this.serverNameFromMasterPOV.toString()); } // Instantiate replication manager if replication enabled. Pass it the @@ -1109,7 +966,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, */ protected HLog instantiateHLog(Path logdir, Path oldLogDir) throws IOException { return new HLog(this.fs, logdir, oldLogDir, this.conf, - getWALActionListeners(), this.serverInfo.getServerAddress().toString()); + getWALActionListeners(), this.serverNameFromMasterPOV.toString()); } /** @@ -1225,7 +1082,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, }; // Start executor services - this.service = new ExecutorService(getServerName()); + this.service = new ExecutorService(getServerName().toString()); this.service.startExecutorService(ExecutorType.RS_OPEN_REGION, conf.getInt("hbase.regionserver.executor.openregion.threads", 3)); this.service.startExecutorService(ExecutorType.RS_OPEN_ROOT, @@ -1241,46 +1098,20 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, Threads.setDaemonThreadRunning(this.hlogRoller, n + ".logRoller", handler); Threads.setDaemonThreadRunning(this.cacheFlusher, n + ".cacheFlusher", - handler); + handler); Threads.setDaemonThreadRunning(this.compactSplitThread, n + ".compactor", - handler); - Threads.setDaemonThreadRunning(this.majorCompactionChecker, n - + ".majorCompactionChecker", handler); + handler); + Threads.setDaemonThreadRunning(this.majorCompactionChecker, n + + ".majorCompactionChecker", handler); // Leases is not a Thread. Internally it runs a daemon thread. If it gets // an unhandled exception, it will just exit. this.leases.setName(n + ".leaseChecker"); this.leases.start(); - // Put up info server. - int port = this.conf.getInt("hbase.regionserver.info.port", 60030); - // -1 is for disabling info server - if (port >= 0) { - String addr = this.conf.get("hbase.regionserver.info.bindAddress", - "0.0.0.0"); - // check if auto port bind enabled - boolean auto = this.conf.getBoolean("hbase.regionserver.info.port.auto", - false); - while (true) { - try { - this.infoServer = new InfoServer("regionserver", addr, port, false); - this.infoServer.setAttribute("regionserver", this); - this.infoServer.start(); - break; - } catch (BindException e) { - if (!auto) { - // auto bind disabled throw BindException - throw e; - } - // auto bind enabled, try to use another port - LOG.info("Failed binding http info server to port: " + port); - port++; - // update HRS server info port. - this.serverInfo = new HServerInfo(this.serverInfo.getServerAddress(), - this.serverInfo.getStartCode(), port, - this.serverInfo.getHostname()); - } - } - } + + // Put up the webui. Webui may come up on port other than configured if + // that port is occupied. Adjust serverInfo if this is the case. + this.webuiport = putPutUpWebUI(); if (this.replicationHandler != null) { this.replicationHandler.startReplicationServices(); @@ -1291,6 +1122,37 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, this.server.start(); } + /** + * Puts up the webui. + * @return Returns final port -- maybe different from what we started with. + * @throws IOException + */ + private int putPutUpWebUI() throws IOException { + int port = this.conf.getInt("hbase.regionserver.info.port", 60030); + // -1 is for disabling info server + if (port < 0) return port; + String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0"); + // check if auto port bind enabled + boolean auto = this.conf.getBoolean("hbase.regionserver.info.port.auto", false); + while (true) { + try { + this.infoServer = new InfoServer("regionserver", addr, port, false); + this.infoServer.setAttribute("regionserver", this); + this.infoServer.start(); + break; + } catch (BindException e) { + if (!auto) { + // auto bind disabled throw BindException + throw e; + } + // auto bind enabled, try to use another port + LOG.info("Failed binding http info server to port: " + port); + port++; + } + } + return port; + } + /* * Verify that server is healthy */ @@ -1346,15 +1208,18 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, // Update ZK, ROOT or META if (r.getRegionInfo().isRootRegion()) { RootLocationEditor.setRootLocation(getZooKeeper(), - getServerInfo().getServerAddress()); + this.serverNameFromMasterPOV); } else if (r.getRegionInfo().isMetaRegion()) { - MetaEditor.updateMetaLocation(ct, r.getRegionInfo(), getServerInfo()); + MetaEditor.updateMetaLocation(ct, r.getRegionInfo(), + this.serverNameFromMasterPOV); } else { if (daughter) { // If daughter of a split, update whole row, not just location. - MetaEditor.addDaughter(ct, r.getRegionInfo(), getServerInfo()); + MetaEditor.addDaughter(ct, r.getRegionInfo(), + this.serverNameFromMasterPOV); } else { - MetaEditor.updateRegionLocation(ct, r.getRegionInfo(), getServerInfo()); + MetaEditor.updateRegionLocation(ct, r.getRegionInfo(), + this.serverNameFromMasterPOV); } } } @@ -1429,17 +1294,19 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, * Method will block until a master is available. You can break from this * block by requesting the server stop. * - * @return master address, or null if server has been stopped + * @return master + port, or null if server has been stopped */ - private HServerAddress getMaster() { - HServerAddress masterAddress = null; - while ((masterAddress = masterAddressManager.getMasterAddress()) == null) { + private ServerName getMaster() { + ServerName masterServerName = null; + while ((masterServerName = masterAddressManager.getMasterAddress()) == null) { if (stopped) { return null; } LOG.debug("No master found, will retry"); sleeper.sleep(); } + InetSocketAddress isa = + new InetSocketAddress(masterServerName.getHostname(), masterServerName.getPort()); HMasterRegionInterface master = null; while (!stopped && master == null) { try { @@ -1447,7 +1314,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, // should retry indefinitely. master = (HMasterRegionInterface) HBaseRPC.waitForProxy( HMasterRegionInterface.class, HMasterRegionInterface.VERSION, - masterAddress.getInetSocketAddress(), this.conf, -1, + isa, this.conf, -1, this.rpcTimeout, this.rpcTimeout); } catch (IOException e) { e = e instanceof RemoteException ? @@ -1460,9 +1327,9 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, sleeper.sleep(); } } - LOG.info("Connected to master at " + masterAddress); + LOG.info("Connected to master at " + isa); this.hbaseMaster = master; - return masterAddress; + return masterServerName; } /** @@ -1475,35 +1342,31 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, handleReportForDutyResponse(w); return true; } - sleeper.sleep(); - LOG.warn("No response on reportForDuty. Sleeping and then retrying."); return false; } /* * Let the master know we're here Run initialization using parameters passed * us by the master. + * @return A Map of key/value configurations we got from the Master else + * null if we failed to register. + * @throws IOException */ private MapWritable reportForDuty() throws IOException { - HServerAddress masterAddress = null; - while (!stopped && (masterAddress = getMaster()) == null) { + ServerName masterServerName = null; + while (!stopped && (masterServerName = getMaster()) == null) { + LOG.warn("Unable to get master for initialization -- sleeping"); sleeper.sleep(); - LOG.warn("Unable to get master for initialization"); } - MapWritable result = null; long lastMsg = 0; while (!stopped) { try { this.requestCount.set(0); - lastMsg = System.currentTimeMillis(); - ZKUtil.setAddressAndWatch(zooKeeper, - ZKUtil.joinZNode(zooKeeper.rsZNode, ZKUtil.getNodeName(serverInfo)), - this.serverInfo.getServerAddress()); - this.serverInfo.setLoad(buildServerLoad()); - LOG.info("Telling master at " + masterAddress + " that we are up"); - result = this.hbaseMaster.regionServerStartup(this.serverInfo, - EnvironmentEdgeManager.currentTimeMillis()); + LOG.info("Telling master at " + masterServerName + " that we are up"); + lastMsg = EnvironmentEdgeManager.currentTimeMillis(); + int port = this.server.getListenerAddress().getPort(); + result = this.hbaseMaster.regionServerStartup(port, this.startcode, lastMsg); break; } catch (RemoteException e) { IOException ioe = e.unwrapRemoteException(); @@ -1517,8 +1380,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } } catch (IOException e) { LOG.warn("error telling master we are up", e); - } catch (KeeperException e) { - LOG.warn("error putting up ephemeral node in zookeeper", e); } sleeper.sleep(lastMsg); } @@ -1526,22 +1387,18 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } /** - * Add to the outbound message buffer - * - * When a region splits, we need to tell the master that there are two new - * regions that need to be assigned. - * - * We do not need to inform the master about the old region, because we've - * updated the meta or root regions, and the master will pick that up on its - * next rescan of the root or meta tables. + * Report split. */ void reportSplit(HRegionInfo oldRegion, HRegionInfo newRegionA, HRegionInfo newRegionB) { + throw new NotImplementedException("FIX"); + /* this.outboundMsgs.add(new HMsg( HMsg.Type.REGION_SPLIT, oldRegion, newRegionA, newRegionB, Bytes.toBytes("Daughters; " + newRegionA.getRegionNameAsString() + ", " + newRegionB.getRegionNameAsString()))); + */ } /** @@ -2272,6 +2129,43 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } /** + * @param encodedRegionName + * @return JSON Map of labels to values for passed in encodedRegionName + * @throws IOException + */ + public byte [] getRegionStats(final String encodedRegionName) + throws IOException { + HRegion r = null; + synchronized (this.onlineRegions) { + r = this.onlineRegions.get(encodedRegionName); + } + if (r == null) return null; + ObjectMapper mapper = new ObjectMapper(); + int stores = 0; + int storefiles = 0; + int storefileSizeMB = 0; + int memstoreSizeMB = (int) (r.memstoreSize.get() / 1024 / 1024); + int storefileIndexSizeMB = 0; + synchronized (r.stores) { + stores += r.stores.size(); + for (Store store : r.stores.values()) { + storefiles += store.getStorefilesCount(); + storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024); + storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024); + } + } + Map map = new TreeMap(); + map.put("stores", stores); + map.put("storefiles", storefiles); + map.put("storefileSizeMB", storefileIndexSizeMB); + map.put("memstoreSizeMB", memstoreSizeMB); + StringWriter w = new StringWriter(); + mapper.writeValue(w, map); + w.close(); + return Bytes.toBytes(w.toString()); + } + + /** * For tests and web ui. * This method will only work if HRegionServer is in the same JVM as client; * HRegion cannot be serialized to cross an rpc. @@ -2449,18 +2343,11 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, @Override @QosPriority(priority=HIGH_QOS) public long getProtocolVersion(final String protocol, final long clientVersion) - throws IOException { + throws IOException { if (protocol.equals(HRegionInterface.class.getName())) { return HRegionInterface.VERSION; } - throw new IOException("Unknown protocol to name node: " + protocol); - } - - /** - * @return Queue to which you can add outbound messages. - */ - protected LinkedBlockingQueue getOutboundMsgs() { - return this.outboundMsgs; + throw new IOException("Unknown protocol: " + protocol); } /** @@ -2500,13 +2387,18 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } /** - * @return Info on port this server has bound to, etc. + * @return This servers {@link HServerInfo} */ + // TODO: Deprecate and do getServerName instead. public HServerInfo getServerInfo() { - return this.serverInfo; + try { + return getHServerInfo(); + } catch (IOException e) { + e.printStackTrace(); + } + return null; } - @Override public Result increment(byte[] regionName, Increment increment) throws IOException { @@ -2582,11 +2474,14 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } } - /** {@inheritDoc} */ + /** {@inheritDoc} + * @deprecated Use {@link #getServerName()} instead. + */ @Override @QosPriority(priority=HIGH_QOS) public HServerInfo getHServerInfo() throws IOException { - return serverInfo; + return new HServerInfo(new HServerAddress(this.server.getListenerAddress()), + this.startcode, this.webuiport); } @SuppressWarnings("unchecked") @@ -2736,7 +2631,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } public String toString() { - return this.serverInfo.toString(); + return this.server.getListenerAddress().toString(); } /** @@ -2754,8 +2649,8 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } @Override - public String getServerName() { - return serverInfo.getServerName(); + public ServerName getServerName() { + return this.serverNameFromMasterPOV; } @Override @@ -2779,8 +2674,8 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, */ public static Thread startRegionServer(final HRegionServer hrs) throws IOException { - return startRegionServer(hrs, "regionserver" - + hrs.getServerInfo().getServerAddress().getPort()); + return startRegionServer(hrs, "regionserver" + + hrs.server.getListenerAddress().getPort()); } /** @@ -2839,4 +2734,4 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, new HRegionServerCommandLine(regionServerClass).doMain(args); } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index 1b956ae..ac3e4b8 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.ipc.HBaseRpcMetrics; import org.apache.hadoop.hbase.regionserver.wal.HLog; @@ -61,12 +60,6 @@ public interface RegionServerServices extends OnlineRegions { public FlushRequester getFlushRequester(); /** - * Return data structure that has Server address and startcode. - * @return The HServerInfo for this RegionServer. - */ - public HServerInfo getServerInfo(); - - /** * Tasks to perform after region open to complete deploy of region on * regionserver * @param r Region to open. diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerZNodeContent.java b/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerZNodeContent.java new file mode 100644 index 0000000..ce66cb4 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerZNodeContent.java @@ -0,0 +1,73 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.io.Reader; +import java.io.StringReader; +import java.io.StringWriter; +import java.io.Writer; + +import org.apache.hadoop.hbase.util.Bytes; +import org.codehaus.jackson.map.ObjectMapper; + +/** + * Class that contains the content of RegionServer znode. + * Its intentionally all Bean-like so its easy to serialize, deserialize + * using JSON. + */ +public class RegionServerZNodeContent { + private int webuiport = -1; + + RegionServerZNodeContent() { + super(); + } + + public int getWebuiport() { + return webuiport; + } + public void setWebuiport(int webuiport) { + this.webuiport = webuiport; + } + + /** + * @return Data to serialize into rs znode + * @throws IOException + * @param webuiport + * @see {@link #getRSZNodeContent(byte[])} + */ + public static byte [] getRegionServerZNodeBytes(final int webuiport) + throws IOException { + RegionServerZNodeContent data = new RegionServerZNodeContent(); + data.setWebuiport(webuiport); + ObjectMapper mapper = new ObjectMapper(); + Writer w = new StringWriter(); + mapper.writeValue(w, data); + w.close(); + return Bytes.toBytes(w.toString()); + } + + public static RegionServerZNodeContent getRSZNodeContent(final byte [] bytes) + throws IOException { + ObjectMapper mapper = new ObjectMapper(); + Reader reader = new StringReader(Bytes.toString(bytes)); + return mapper.readValue(reader, RegionServerZNodeContent.class); + } +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index a846d06..f18d5f1 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -40,7 +40,6 @@ import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.regex.Matcher; @@ -57,8 +56,8 @@ import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.FSUtils; @@ -146,9 +145,6 @@ public class HLog implements Syncable { private Method getNumCurrentReplicas; // refers to DFSOutputStream.getNumCurrentReplicas final static Object [] NO_ARGS = new Object []{}; - // used to indirectly tell syncFs to force the sync - private boolean forceSync = false; - public interface Reader { void init(FileSystem fs, Path path, Configuration c) throws IOException; void close() throws IOException; @@ -1282,36 +1278,10 @@ public class HLog implements Syncable { /** * Construct the HLog directory name * - * @param info HServerInfo for server - * @return the HLog directory name - */ - public static String getHLogDirectoryName(HServerInfo info) { - return getHLogDirectoryName(info.getServerName()); - } - - /** - * Construct the HLog directory name - * - * @param serverAddress - * @param startCode - * @return the HLog directory name - */ - public static String getHLogDirectoryName(String serverAddress, - long startCode) { - if (serverAddress == null || serverAddress.length() == 0) { - return null; - } - return getHLogDirectoryName( - HServerInfo.getServerName(serverAddress, startCode)); - } - - /** - * Construct the HLog directory name - * - * @param serverName + * @param serverName Server name formatted as described in {@link ServerName} * @return the HLog directory name */ - public static String getHLogDirectoryName(String serverName) { + public static String getHLogDirectoryName(final String serverName) { StringBuilder dirName = new StringBuilder(HConstants.HREGION_LOGDIR_NAME); dirName.append("/"); dirName.append(serverName); diff --git a/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index 548c8eb..6de47e6 100644 --- a/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -24,7 +24,7 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; /** @@ -37,8 +37,7 @@ public class ReplicationPeer { private final String clusterKey; private final String id; - private List regionServers = - new ArrayList(0); + private List regionServers = new ArrayList(0); private final AtomicBoolean peerEnabled = new AtomicBoolean(); // Cannot be final since a new object needs to be recreated when session fails private ZooKeeperWatcher zkw; @@ -82,7 +81,7 @@ public class ReplicationPeer { * for this peer cluster * @return list of addresses */ - public List getRegionServers() { + public List getRegionServers() { return regionServers; } @@ -90,7 +89,7 @@ public class ReplicationPeer { * Set the list of region servers for that peer * @param regionServers list of addresses for the region servers */ - public void setRegionServers(List regionServers) { + public void setRegionServers(List regionServers) { this.regionServers = regionServers; } diff --git a/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java b/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java index f4ae3c3..6f6a32c 100644 --- a/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java +++ b/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java @@ -35,14 +35,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; /** @@ -137,7 +136,7 @@ public class ReplicationZookeeper { this.peerClusters = new HashMap(); ZKUtil.createWithParents(this.zookeeper, ZKUtil.joinZNode(this.replicationZNode, this.replicationStateNodeName)); - this.rsServerNameZnode = ZKUtil.joinZNode(rsZNode, server.getServerName()); + this.rsServerNameZnode = ZKUtil.joinZNode(rsZNode, server.getServerName().toString()); ZKUtil.createWithParents(this.zookeeper, this.rsServerNameZnode); connectExistingPeers(); } @@ -204,14 +203,14 @@ public class ReplicationZookeeper { * @param peerClusterId (byte) the cluster to interrogate * @return addresses of all region servers */ - public List getSlavesAddresses(String peerClusterId) + public List getSlavesAddresses(String peerClusterId) throws KeeperException { if (this.peerClusters.size() == 0) { - return new ArrayList(0); + return new ArrayList(0); } ReplicationPeer peer = this.peerClusters.get(peerClusterId); if (peer == null) { - return new ArrayList(0); + return new ArrayList(0); } peer.setRegionServers(fetchSlavesAddresses(peer.getZkw())); return peer.getRegionServers(); @@ -222,10 +221,10 @@ public class ReplicationZookeeper { * @param zkw zk connection to use * @return list of region server addresses */ - private List fetchSlavesAddresses(ZooKeeperWatcher zkw) { - List rss = null; + private List fetchSlavesAddresses(ZooKeeperWatcher zkw) { + List rss = null; try { - rss = ZKUtil.listChildrenAndGetAsAddresses(zkw, zkw.rsZNode); + rss = listChildrenAndGetAsServerNames(zkw, zkw.rsZNode); } catch (KeeperException e) { LOG.warn("Cannot get peer's region server addresses", e); } @@ -233,6 +232,37 @@ public class ReplicationZookeeper { } /** + * Lists the children of the specified znode, retrieving the data of each + * child as a server address. + * + * Used to list the currently online regionservers and their addresses. + * + * Sets no watches at all, this method is best effort. + * + * Returns an empty list if the node has no children. Returns null if the + * parent node itself does not exist. + * + * @param zkw zookeeper reference + * @param znode node to get children of as addresses + * @return list of data of children of specified znode, empty if no children, + * null if parent does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static List listChildrenAndGetAsServerNames( + ZooKeeperWatcher zkw, String znode) + throws KeeperException { + List children = ZKUtil.listChildrenNoWatch(zkw, znode); + if(children == null) { + return null; + } + List addresses = new ArrayList(children.size()); + for (String child : children) { + addresses.add(new ServerName(child)); + } + return addresses; + } + + /** * This method connects this cluster to another one and registers it * in this region server's replication znode * @param peerId id of the peer cluster diff --git a/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index ac9bb77..5f0888b 100644 --- a/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; @@ -201,7 +202,7 @@ public class ReplicationSource extends Thread */ private void chooseSinks() throws KeeperException { this.currentPeers.clear(); - List addresses = + List addresses = this.zkHelper.getSlavesAddresses(peerClusterId); Set setOfAddr = new HashSet(); int nbPeers = (int) (Math.ceil(addresses.size() * ratio)); @@ -211,7 +212,8 @@ public class ReplicationSource extends Thread HServerAddress address; // Make sure we get one address that we don't already have do { - address = addresses.get(this.random.nextInt(addresses.size())); + ServerName sn = addresses.get(this.random.nextInt(addresses.size())); + address = new HServerAddress(sn.getHostname(), sn.getPort()); } while (setOfAddr.contains(address)); LOG.info("Choosing peer " + address); setOfAddr.add(address); diff --git a/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java index 69ed646..47b10bb 100644 --- a/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java +++ b/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java @@ -35,8 +35,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.HServerInfo; -import org.apache.hadoop.hbase.HServerLoad; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel; @@ -73,20 +72,14 @@ public class StorageClusterStatusResource extends ResourceBase { model.setRegions(status.getRegionsCount()); model.setRequests(status.getRequestsCount()); model.setAverageLoad(status.getAverageLoad()); - for (HServerInfo info: status.getServerInfo()) { - HServerLoad load = info.getLoad(); + for (ServerName info: status.getServers()) { StorageClusterStatusModel.Node node = model.addLiveNode( - info.getServerAddress().getHostname() + ":" + - Integer.toString(info.getServerAddress().getPort()), - info.getStartCode(), load.getUsedHeapMB(), - load.getMaxHeapMB()); - node.setRequests(load.getNumberOfRequests()); - for (HServerLoad.RegionLoad region: load.getRegionsLoad()) { - node.addRegion(region.getName(), region.getStores(), - region.getStorefiles(), region.getStorefileSizeMB(), - region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB()); - } + info.getHostname() + ":" + + Integer.toString(info.getPort()), + info.getStartcode(), 0, + 0); + node.setRequests(0); } for (String name: status.getDeadServerNames()) { model.addDeadNode(name); diff --git a/src/main/java/org/apache/hadoop/hbase/util/Addressing.java b/src/main/java/org/apache/hadoop/hbase/util/Addressing.java new file mode 100644 index 0000000..6de1440 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/util/Addressing.java @@ -0,0 +1,73 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.net.InetSocketAddress; + +/** + * Utility for network addresses, resolving and naming. + */ +public class Addressing { + public static final String HOSTNAME_PORT_SEPARATOR = ":"; + + /** + * @param hostAndPort Formatted as <hostname> ':' <port> + * @return An InetSocketInstance + */ + public static InetSocketAddress createInetSocketAddressFromHostAndPortStr( + final String hostAndPort) { + return new InetSocketAddress(parseHostname(hostAndPort), parsePort(hostAndPort)); + } + + /** + * @param hostname Server hostname + * @param port Server port + * @return Returns a concatenation of hostname and + * port in following + * form: <hostname> ':' <port> + */ + public static String createHostAndPortStr(final String hostname, final int port) { + return hostname + HOSTNAME_PORT_SEPARATOR + port; + } + + /** + * @param hostAndPort Formatted as <hostname> ':' <port> + * @return The hostname portion of hostAndPort + */ + public static String parseHostname(final String hostAndPort) { + int colonIndex = hostAndPort.lastIndexOf(HOSTNAME_PORT_SEPARATOR); + if (colonIndex < 0) { + throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort); + } + return hostAndPort.substring(0, colonIndex); + } + + /** + * @param hostAndPort Formatted as <hostname> ':' <port> + * @return The port portion of hostAndPort + */ + public static int parsePort(final String hostAndPort) { + int colonIndex = hostAndPort.lastIndexOf(HOSTNAME_PORT_SEPARATOR); + if (colonIndex < 0) { + throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort); + } + return Integer.parseInt(hostAndPort.substring(colonIndex + 1)); + } +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 8b64738..550b05c 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -42,17 +42,18 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterNotRunningException; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.MetaScanner; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; +import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.zookeeper.ZKTable; @@ -158,12 +159,12 @@ public class HBaseFsck { } // From the master, get a list of all known live region servers - Collection regionServers = status.getServerInfo(); + Collection regionServers = status.getServers(); errors.print("Number of live region servers: " + regionServers.size()); if (details) { - for (HServerInfo rsinfo: regionServers) { - errors.print(" " + rsinfo.getServerName()); + for (ServerName rsinfo: regionServers) { + errors.print(" " + rsinfo); } } @@ -321,20 +322,21 @@ public class HBaseFsck { * @param regionServerList - the list of region servers to connect to * @throws IOException if a remote or network exception occurs */ - void processRegionServers(Collection regionServerList) + void processRegionServers(Collection regionServerList) throws IOException { // loop to contact each region server - for (HServerInfo rsinfo: regionServerList) { + for (ServerName rsinfo: regionServerList) { errors.progress(); try { - HRegionInterface server = connection.getHRegionConnection( - rsinfo.getServerAddress()); + HServerAddress hsa = + new HServerAddress(rsinfo.getHostname(), rsinfo.getPort()); + HRegionInterface server = connection.getHRegionConnection(hsa); // list all online regions from this region server List regions = server.getOnlineRegions(); if (details) { - errors.detail("RegionServer: " + rsinfo.getServerName() + + errors.detail("RegionServer: " + rsinfo + " number of regions: " + regions.size()); for (HRegionInfo rinfo: regions) { errors.detail(" " + rinfo.getRegionNameAsString() + @@ -348,10 +350,10 @@ public class HBaseFsck { // check to see if the existance of this region matches the region in META for (HRegionInfo r:regions) { HbckInfo hbi = getOrCreateInfo(r.getEncodedName()); - hbi.deployedOn.add(rsinfo.getServerAddress()); + hbi.deployedOn.add(hsa); } } catch (IOException e) { // unable to connect to the region server. - errors.reportError("\nRegionServer:" + rsinfo.getServerName() + + errors.reportError("\nRegionServer:" + rsinfo + " Unable to fetch region information. " + e); } } @@ -696,35 +698,22 @@ public class HBaseFsck { // record the latest modification of this META record long ts = Collections.max(result.list(), comp).getTimestamp(); - - // record region details - byte [] value = result.getValue(HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER); - if (value == null || value.length == 0) { + Pair pair = + MetaReader.metaRowToRegionPair(result); + if (pair == null || pair.getFirst() == null) { emptyRegionInfoQualifiers.add(result); return true; } - HRegionInfo info = Writables.getHRegionInfo(value); - HServerAddress server = null; - byte[] startCode = null; - - // record assigned region server - value = result.getValue(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER); - if (value != null && value.length > 0) { - String address = Bytes.toString(value); - server = new HServerAddress(address); - } - - // record region's start key - value = result.getValue(HConstants.CATALOG_FAMILY, - HConstants.STARTCODE_QUALIFIER); - if (value != null) { - startCode = value; + HServerAddress hsa = null; + byte [] startCode = null; + if (pair.getSecond() != null) { + ServerName sn = pair.getSecond(); + hsa = new HServerAddress(sn.getHostname(), sn.getPort()); + startCode = Bytes.toBytes(sn.getStartcode()); } - MetaEntry m = new MetaEntry(info, server, startCode, ts); + MetaEntry m = new MetaEntry(pair.getFirst(), hsa, startCode, ts); HbckInfo hbInfo = new HbckInfo(m); - HbckInfo previous = regionInfo.put(info.getEncodedName(), hbInfo); + HbckInfo previous = regionInfo.put(pair.getFirst().getEncodedName(), hbInfo); if (previous != null) { throw new IOException("Two entries in META are same " + previous); } diff --git a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index b624d28..ea55521 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -82,7 +82,7 @@ public class HBaseFsckRepair { throws ZooKeeperConnectionException, KeeperException, IOException { ZKAssign.createOrForceNodeOffline( HConnectionManager.getConnection(conf).getZooKeeperWatcher(), - region, HConstants.HBCK_CODE_NAME); + region, HConstants.HBCK_CODE_SERVERNAME); } private static void closeRegionSilentlyAndWait(Configuration conf, diff --git a/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index baf0c27..5bd94fb 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -189,7 +189,7 @@ public class JVMClusterUtil { while (true) { for (JVMClusterUtil.MasterThread t : masters) { if (t.master.isActiveMaster()) { - return t.master.getMasterAddress().toString(); + return t.master.getServerName().toString(); } } try { diff --git a/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java b/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java index 0437484..2f49ab4 100644 --- a/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java +++ b/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java @@ -19,14 +19,19 @@ */ package org.apache.hadoop.hbase.zookeeper; +import java.io.IOException; +import java.util.ArrayList; import java.util.List; +import java.util.NavigableMap; +import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.regionserver.RegionServerZNodeContent; +import org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData; import org.apache.zookeeper.KeeperException; /** @@ -41,7 +46,8 @@ import org.apache.zookeeper.KeeperException; */ public class RegionServerTracker extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(RegionServerTracker.class); - + private NavigableMap regionServers = + new TreeMap(); private ServerManager serverManager; private Abortable abortable; @@ -58,32 +64,58 @@ public class RegionServerTracker extends ZooKeeperListener { *

All RSs will be tracked after this method is called. * * @throws KeeperException + * @throws IOException */ - public void start() throws KeeperException { + public void start() throws KeeperException, IOException { watcher.registerListener(this); - ZKUtil.watchAndGetNewChildren(watcher, watcher.rsZNode); + List servers = + ZKUtil.watchAndGetNewChildren(watcher, watcher.rsZNode); + add(servers); + } + + private void add(final List servers) throws IOException { + synchronized(this.regionServers) { + this.regionServers.clear(); + for (NodeAndData n: servers) { + ServerName sn = new ServerName(ZKUtil.getNodeName(n.getNode())); + RegionServerZNodeContent content = + RegionServerZNodeContent.getRSZNodeContent(n.getData()); + this.regionServers.put(sn, content); + } + } + } + + private void remove(final ServerName sn) { + synchronized(this.regionServers) { + this.regionServers.remove(sn); + } } @Override public void nodeDeleted(String path) { - if(path.startsWith(watcher.rsZNode)) { + if (path.startsWith(watcher.rsZNode)) { String serverName = ZKUtil.getNodeName(path); LOG.info("RegionServer ephemeral node deleted, processing expiration [" + - serverName + "]"); - HServerInfo hsi = serverManager.getServerInfo(serverName); - if(hsi == null) { - LOG.info("No HServerInfo found for " + serverName); + serverName + "]"); + ServerName sn = new ServerName(serverName); + if (!serverManager.isServerOnline(sn)) { + LOG.info(serverName.toString() + " is not online"); return; } - serverManager.expireServer(hsi); + remove(sn); + this.serverManager.expireServer(sn); } } @Override public void nodeChildrenChanged(String path) { - if(path.equals(watcher.rsZNode)) { + if (path.equals(watcher.rsZNode)) { try { - ZKUtil.watchAndGetNewChildren(watcher, watcher.rsZNode); + List servers = + ZKUtil.watchAndGetNewChildren(watcher, watcher.rsZNode); + add(servers); + } catch (IOException e) { + abortable.abort("Unexpected zk exception getting RS nodes", e); } catch (KeeperException e) { abortable.abort("Unexpected zk exception getting RS nodes", e); } @@ -92,10 +124,12 @@ public class RegionServerTracker extends ZooKeeperListener { /** * Gets the online servers. - * @return list of online servers from zk + * @return list of online servers * @throws KeeperException */ - public List getOnlineServers() throws KeeperException { - return ZKUtil.listChildrenAndGetAsAddresses(watcher, watcher.rsZNode); + public List getOnlineServers() { + synchronized (this.regionServers) { + return new ArrayList(this.regionServers.keySet()); + } } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java b/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java index 692b608..4007f14 100644 --- a/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java +++ b/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java @@ -20,9 +20,8 @@ package org.apache.hadoop.hbase.zookeeper; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.RootLocationEditor; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.util.Bytes; /** @@ -54,31 +53,33 @@ public class RootRegionTracker extends ZooKeeperNodeTracker { /** * Gets the root region location, if available. Null if not. Does not block. - * @return server address for server hosting root region, null if none available + * @return server name * @throws InterruptedException */ - public HServerAddress getRootRegionLocation() throws InterruptedException { - return dataToHServerAddress(super.getData()); + public ServerName getRootRegionLocation() throws InterruptedException { + return new ServerName(dataToString(super.getData())); } /** * Gets the root region location, if available, and waits for up to the * specified timeout if not immediately available. * @param timeout maximum time to wait, in millis - * @return server address for server hosting root region, null if timed out + * @return server name for server hosting root region formatted as per + * {@link ServerName}, or null if none available * @throws InterruptedException if interrupted while waiting */ - public HServerAddress waitRootRegionLocation(long timeout) + public ServerName waitRootRegionLocation(long timeout) throws InterruptedException { - return dataToHServerAddress(super.blockUntilAvailable(timeout)); + String str = dataToString(super.blockUntilAvailable(timeout)); + return str == null? null: new ServerName(str); } /* * @param data * @return Returns null if data is null else converts passed data - * to an HServerAddress instance. + * to a String instance. */ - private static HServerAddress dataToHServerAddress(final byte [] data) { - return data == null ? null: new HServerAddress(Bytes.toString(data)); + private static String dataToString(final byte [] data) { + return data == null ? null: Bytes.toString(data); } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java b/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java index 1ac083d..08066bb 100644 --- a/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java +++ b/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java @@ -24,6 +24,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.executor.RegionTransitionData; import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.zookeeper.AsyncCallback; @@ -130,13 +131,13 @@ public class ZKAssign { * @throws KeeperException.NodeExistsException if node already exists */ public static void createNodeOffline(ZooKeeperWatcher zkw, HRegionInfo region, - String serverName) + ServerName serverName) throws KeeperException, KeeperException.NodeExistsException { createNodeOffline(zkw, region, serverName, EventType.M_ZK_REGION_OFFLINE); } public static void createNodeOffline(ZooKeeperWatcher zkw, HRegionInfo region, - String serverName, final EventType event) + ServerName serverName, final EventType event) throws KeeperException, KeeperException.NodeExistsException { LOG.debug(zkw.prefix("Creating unassigned node for " + region.getEncodedName() + " in OFFLINE state")); @@ -165,7 +166,7 @@ public class ZKAssign { * @throws KeeperException.NodeExistsException if node already exists */ public static void asyncCreateNodeOffline(ZooKeeperWatcher zkw, - HRegionInfo region, String serverName, + HRegionInfo region, ServerName serverName, final AsyncCallback.StringCallback cb, final Object ctx) throws KeeperException { LOG.debug(zkw.prefix("Async create of unassigned node for " + @@ -198,7 +199,7 @@ public class ZKAssign { * @throws KeeperException.NoNodeException if node does not exist */ public static void forceNodeOffline(ZooKeeperWatcher zkw, HRegionInfo region, - String serverName) + ServerName serverName) throws KeeperException, KeeperException.NoNodeException { LOG.debug(zkw.prefix("Forcing existing unassigned node for " + region.getEncodedName() + " to OFFLINE state")); @@ -231,7 +232,7 @@ public class ZKAssign { * @throws KeeperException.NodeExistsException if node already exists */ public static boolean createOrForceNodeOffline(ZooKeeperWatcher zkw, - HRegionInfo region, String serverName) + HRegionInfo region, ServerName serverName) throws KeeperException { LOG.debug(zkw.prefix("Creating (or updating) unassigned node for " + region.getEncodedName() + " with OFFLINE state")); @@ -464,7 +465,7 @@ public class ZKAssign { * @throws KeeperException.NodeExistsException if node already exists */ public static int createNodeClosing(ZooKeeperWatcher zkw, HRegionInfo region, - String serverName) + ServerName serverName) throws KeeperException, KeeperException.NodeExistsException { LOG.debug(zkw.prefix("Creating unassigned node for " + region.getEncodedName() + " in a CLOSING state")); @@ -506,7 +507,7 @@ public class ZKAssign { * @throws KeeperException if unexpected zookeeper exception */ public static int transitionNodeClosed(ZooKeeperWatcher zkw, - HRegionInfo region, String serverName, int expectedVersion) + HRegionInfo region, ServerName serverName, int expectedVersion) throws KeeperException { return transitionNode(zkw, region, serverName, EventType.RS_ZK_REGION_CLOSING, @@ -540,14 +541,14 @@ public class ZKAssign { * @throws KeeperException if unexpected zookeeper exception */ public static int transitionNodeOpening(ZooKeeperWatcher zkw, - HRegionInfo region, String serverName) + HRegionInfo region, ServerName serverName) throws KeeperException { return transitionNodeOpening(zkw, region, serverName, EventType.M_ZK_REGION_OFFLINE); } public static int transitionNodeOpening(ZooKeeperWatcher zkw, - HRegionInfo region, String serverName, final EventType beginState) + HRegionInfo region, ServerName serverName, final EventType beginState) throws KeeperException { return transitionNode(zkw, region, serverName, beginState, EventType.RS_ZK_REGION_OPENING, -1); @@ -580,7 +581,7 @@ public class ZKAssign { * @throws KeeperException if unexpected zookeeper exception */ public static int retransitionNodeOpening(ZooKeeperWatcher zkw, - HRegionInfo region, String serverName, int expectedVersion) + HRegionInfo region, ServerName serverName, int expectedVersion) throws KeeperException { return transitionNode(zkw, region, serverName, EventType.RS_ZK_REGION_OPENING, @@ -616,7 +617,7 @@ public class ZKAssign { * @throws KeeperException if unexpected zookeeper exception */ public static int transitionNodeOpened(ZooKeeperWatcher zkw, - HRegionInfo region, String serverName, int expectedVersion) + HRegionInfo region, ServerName serverName, int expectedVersion) throws KeeperException { return transitionNode(zkw, region, serverName, EventType.RS_ZK_REGION_OPENING, @@ -652,7 +653,7 @@ public class ZKAssign { * @throws KeeperException if unexpected zookeeper exception */ public static int transitionNode(ZooKeeperWatcher zkw, HRegionInfo region, - String serverName, EventType beginState, EventType endState, + ServerName serverName, EventType beginState, EventType endState, int expectedVersion) throws KeeperException { String encoded = region.getEncodedName(); diff --git a/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index ead223f..79b5a18 100644 --- a/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -33,8 +33,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.executor.RegionTransitionData; import org.apache.hadoop.hbase.util.Bytes; import org.apache.zookeeper.AsyncCallback; @@ -126,19 +124,6 @@ public class ZKUtil { } /** - * Get the unique node-name for the specified regionserver. - * - * Used when a server puts up an ephemeral node for itself and needs to use - * a unique name. - * - * @param serverInfo server information - * @return unique, zookeeper-safe znode path for the server instance - */ - public static String getNodeName(HServerInfo serverInfo) { - return serverInfo.getServerName(); - } - - /** * Get the name of the current node from the specified fully-qualified path. * @param path fully-qualified path * @return name of the current node @@ -333,38 +318,6 @@ public class ZKUtil { } /** - * Lists the children of the specified znode, retrieving the data of each - * child as a server address. - * - * Used to list the currently online regionservers and their addresses. - * - * Sets no watches at all, this method is best effort. - * - * Returns an empty list if the node has no children. Returns null if the - * parent node itself does not exist. - * - * @param zkw zookeeper reference - * @param znode node to get children of as addresses - * @return list of data of children of specified znode, empty if no children, - * null if parent does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static List listChildrenAndGetAsAddresses( - ZooKeeperWatcher zkw, String znode) - throws KeeperException { - List children = listChildrenNoWatch(zkw, znode); - if(children == null) { - return null; - } - List addresses = - new ArrayList(children.size()); - for(String child : children) { - addresses.add(getDataAsAddress(zkw, joinZNode(znode, child))); - } - return addresses; - } - - /** * Lists the children of the specified znode without setting any watches. * * Used to list the currently online regionservers and their addresses. @@ -602,32 +555,6 @@ public class ZKUtil { } /** - * Get the data at the specified znode, deserialize it as an HServerAddress, - * and set a watch. - * - * Returns the data as a server address and sets a watch if the node exists. - * Returns null and no watch is set if the node does not exist or there is an - * exception. - * - * @param zkw zk reference - * @param znode path of node - * @return data of the specified node as a server address, or null - * @throws KeeperException if unexpected zookeeper exception - */ - public static HServerAddress getDataAsAddress(ZooKeeperWatcher zkw, - String znode) - throws KeeperException { - byte [] data = getDataAndWatch(zkw, znode); - if(data == null) { - return null; - } - String addrString = Bytes.toString(data); - LOG.debug(zkw.prefix("Read server address from znode " + znode + ": " + - addrString)); - return new HServerAddress(addrString); - } - - /** * Update the data of an existing node with the expected version to have the * specified data. * @@ -657,31 +584,6 @@ public class ZKUtil { // /** - * Set the specified znode to be an ephemeral node carrying the specified - * server address. Used by masters for their ephemeral node and regionservers - * for their ephemeral node. - * - * If the node is created successfully, a watcher is also set on the node. - * - * If the node is not created successfully because it already exists, this - * method will also set a watcher on the node. - * - * If there is another problem, a KeeperException will be thrown. - * - * @param zkw zk reference - * @param znode path of node - * @param address server address - * @return true if address set, false if not, watch set in both cases - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean setAddressAndWatch(ZooKeeperWatcher zkw, - String znode, HServerAddress address) - throws KeeperException { - return createEphemeralNodeAndWatch(zkw, znode, - Bytes.toBytes(address.toString())); - } - - /** * Sets the data of the existing znode to be the specified data. Ensures that * the current data has the specified expected version. * @@ -745,8 +647,7 @@ public class ZKUtil { * @param data data to set for node * @throws KeeperException if unexpected zookeeper exception */ - public static void setData(ZooKeeperWatcher zkw, String znode, - byte [] data) + public static void setData(ZooKeeperWatcher zkw, String znode, byte [] data) throws KeeperException, KeeperException.NoNodeException { setData(zkw, znode, data, -1); } @@ -1012,10 +913,9 @@ public class ZKUtil { public static void deleteChildrenRecursively(ZooKeeperWatcher zkw, String node) throws KeeperException { List children = ZKUtil.listChildrenNoWatch(zkw, node); - if(children != null || !children.isEmpty()) { - for(String child : children) { - deleteNodeRecursively(zkw, joinZNode(node, child)); - } + if (children == null || children.isEmpty()) return; + for(String child : children) { + deleteNodeRecursively(zkw, joinZNode(node, child)); } } @@ -1029,13 +929,12 @@ public class ZKUtil { try { sb.append("HBase is rooted at ").append(zkw.baseZNode); sb.append("\nMaster address: ").append( - getDataAsAddress(zkw, zkw.masterAddressZNode)); + Bytes.toStringBinary(getData(zkw, zkw.masterAddressZNode))); sb.append("\nRegion server holding ROOT: ").append( - getDataAsAddress(zkw, zkw.rootServerZNode)); + Bytes.toStringBinary(getData(zkw, zkw.rootServerZNode))); sb.append("\nRegion servers:"); - for (HServerAddress address : listChildrenAndGetAsAddresses(zkw, - zkw.rsZNode)) { - sb.append("\n ").append(address); + for (String child: listChildrenNoWatch(zkw, zkw.rsZNode)) { + sb.append("\n ").append(child); } sb.append("\nQuorum Server Statistics:"); String[] servers = zkw.getQuorum().split(","); diff --git a/src/main/resources/hbase-webapps/master/master.jsp b/src/main/resources/hbase-webapps/master/master.jsp index ed38ff2..6f9130e 100644 --- a/src/main/resources/hbase-webapps/master/master.jsp +++ b/src/main/resources/hbase-webapps/master/master.jsp @@ -7,16 +7,14 @@ import="org.apache.hadoop.hbase.util.FSUtils" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.HConstants" + import="org.apache.hadoop.hbase.ServerName" import="org.apache.hadoop.hbase.client.HBaseAdmin" - import="org.apache.hadoop.hbase.HServerInfo" - import="org.apache.hadoop.hbase.HServerAddress" import="org.apache.hadoop.hbase.HTableDescriptor" %><% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); Configuration conf = master.getConfiguration(); - HServerAddress rootLocation = master.getCatalogTracker().getRootLocation(); + ServerName rootLocation = master.getCatalogTracker().getRootLocation(); boolean metaOnline = master.getCatalogTracker().getMetaLocation() != null; - Map serverToServerInfos = - master.getServerManager().getOnlineServers(); + List servers = master.getServerManager().getOnlineServersList(); int interval = conf.getInt("hbase.regionserver.msginterval", 1000)/1000; if (interval == 0) { interval = 1; @@ -31,12 +29,12 @@ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> -HBase Master: <%= master.getMasterAddress().getHostname()%>:<%= master.getMasterAddress().getPort() %> +HBase Master: <%= master.getServerName().getHostAndPort() %> -

Master: <%=master.getMasterAddress().getHostname()%>:<%=master.getMasterAddress().getPort()%>

+

Master: <%=master.getServerName().getHostname()%>:<%=master.getServerName().getPort()%>

@@ -67,7 +65,7 @@ Hadoop Version<%= org.apache.hadoop.util.VersionInfo.getVersion() %>, r<%= org.apache.hadoop.util.VersionInfo.getRevision() %>Hadoop version and svn revision Hadoop Compiled<%= org.apache.hadoop.util.VersionInfo.getDate() %>, <%= org.apache.hadoop.util.VersionInfo.getUser() %>When Hadoop version was compiled and by whom HBase Root Directory<%= FSUtils.getRootDir(master.getConfiguration()).toString() %>Location of HBase home directory -Load average<%= StringUtils.limitDecimalTo2(master.getServerManager().getAverageLoad()) %>Average number of regions per regionserver. Naive computation. +Load average<%= StringUtils.limitDecimalTo2(master.getAverageLoad()) %>Average number of regions per regionserver. Naive computation. <% if (showFragmentation) { %> Fragmentation<%= frags.get("-TOTAL-") != null ? frags.get("-TOTAL-").intValue() + "%" : "n/a" %>Overall fragmentation of all tables, including .META. and -ROOT-. <% } %> @@ -132,26 +130,27 @@ <% } %>

Region Servers

-<% if (serverToServerInfos != null && serverToServerInfos.size() > 0) { %> +<% if (servers != null && servers.size() > 0) { %> <% int totalRegions = 0; int totalRequests = 0; %> - -<% String[] serverNames = serverToServerInfos.keySet().toArray(new String[serverToServerInfos.size()]); + +<% ServerName [] serverNames = servers.toArray(new ServerName[servers.size()]); Arrays.sort(serverNames); - for (String serverName: serverNames) { - HServerInfo hsi = serverToServerInfos.get(serverName); - String hostname = hsi.getServerAddress().getHostname() + ":" + hsi.getInfoPort(); + for (ServerName serverName: serverNames) { + // HARDCODED FOR NOW; FIX -- READ FROM ZK + String hostname = serverName.getHostname() + ":60020"; String url = "http://" + hostname + "/"; - totalRegions += hsi.getLoad().getNumberOfRegions(); - totalRequests += hsi.getLoad().getNumberOfRequests() / interval; - long startCode = hsi.getStartCode(); + // TODO: FIX + totalRegions += 0; + totalRequests += 0; + long startCode = serverName.getStartcode(); %> - + <% } %> - +
AddressStart CodeLoad
AddressStart CodeLoad
<%= hostname %><%= startCode %><%= hsi.getLoad().toString(interval) %>
<%= hostname %><%= startCode %><%= 0 %>
Total: servers: <%= serverToServerInfos.size() %> requests=<%= totalRequests %>, regions=<%= totalRegions %>
Total: servers: <%= servers.size() %> requests=<%= totalRequests %>, regions=<%= totalRegions %>

Load is requests per second and count of regions loaded

diff --git a/src/main/resources/hbase-webapps/master/table.jsp b/src/main/resources/hbase-webapps/master/table.jsp index f312004..fa0611f 100644 --- a/src/main/resources/hbase-webapps/master/table.jsp +++ b/src/main/resources/hbase-webapps/master/table.jsp @@ -5,8 +5,10 @@ import="org.apache.hadoop.hbase.client.HTable" import="org.apache.hadoop.hbase.client.HBaseAdmin" import="org.apache.hadoop.hbase.HRegionInfo" - import="org.apache.hadoop.hbase.HServerAddress" + import="org.apache.hadoop.hbase.ServerName" import="org.apache.hadoop.hbase.HServerInfo" + import="org.apache.hadoop.hbase.HServerAddress" + import="org.apache.hadoop.hbase.ServerName" import="org.apache.hadoop.hbase.io.ImmutableBytesWritable" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.util.Bytes" @@ -19,7 +21,7 @@ String tableName = request.getParameter("name"); HTable table = new HTable(conf, tableName); String tableHeader = "

Table Regions

"; - HServerAddress rl = master.getCatalogTracker().getRootLocation(); + ServerName rl = master.getCatalogTracker().getRootLocation(); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); Map frags = null; if (showFragmentation) { @@ -80,8 +82,9 @@ if(tableName.equals(Bytes.toString(HConstants.ROOT_TABLE_NAME))) { %> <%= tableHeader %> +// HARDCODED FOR NOW TODO: FIX GET FROM ZK <% - int infoPort = master.getServerManager().getHServerInfo(rl).getInfoPort(); + int infoPort = 60020; String url = "http://" + rl.getHostname() + ":" + infoPort + "/"; %> @@ -99,9 +102,9 @@ <% // NOTE: Presumes one meta region only. HRegionInfo meta = HRegionInfo.FIRST_META_REGIONINFO; - HServerAddress metaLocation = master.getCatalogTracker().getMetaLocation(); + ServerName metaLocation = master.getCatalogTracker().getMetaLocation(); for (int i = 0; i < 1; i++) { - int infoPort = master.getServerManager().getHServerInfo(metaLocation).getInfoPort(); + int infoPort = 60020; // HARDCODED FOR NOW -- TODO FIX String url = "http://" + metaLocation.getHostname() + ":" + infoPort + "/"; %> @@ -141,16 +144,12 @@ HRegionInfo regionInfo = hriEntry.getKey(); HServerAddress addr = hriEntry.getValue(); - int infoPort = 0; + int infoPort = 60020; // FIX String urlRegionServer = null; if (addr != null) { - HServerInfo info = master.getServerManager().getHServerInfo(addr); - if (info != null) { - infoPort = info.getInfoPort(); - urlRegionServer = + urlRegionServer = "http://" + addr.getHostname().toString() + ":" + infoPort + "/"; - } } %> diff --git a/src/main/resources/hbase-webapps/regionserver/regionserver.jsp b/src/main/resources/hbase-webapps/regionserver/regionserver.jsp index 68d4e42..8de2f31 100644 --- a/src/main/resources/hbase-webapps/regionserver/regionserver.jsp +++ b/src/main/resources/hbase-webapps/regionserver/regionserver.jsp @@ -8,7 +8,6 @@ import="org.apache.hadoop.hbase.util.Bytes" import="org.apache.hadoop.hbase.HConstants" import="org.apache.hadoop.hbase.HServerInfo" - import="org.apache.hadoop.hbase.HServerLoad" import="org.apache.hadoop.hbase.HRegionInfo" %><% HRegionServer regionServer = (HRegionServer)getServletContext().getAttribute(HRegionServer.REGIONSERVER); HServerInfo serverInfo = null; @@ -52,11 +51,11 @@ <% Collections.sort(onlineRegions); for (HRegionInfo r: onlineRegions) { - HServerLoad.RegionLoad load = regionServer.createRegionLoad(r.getEncodedName()); + byte [] load = regionServer.getRegionStats(r.getEncodedName()); %> - + <% } %>
NameRegion ServerStart KeyEnd Key
<%= r.getRegionNameAsString() %> <%= Bytes.toStringBinary(r.getStartKey()) %><%= Bytes.toStringBinary(r.getEndKey()) %><%= load.toString() %><%= Bytes.toString(load) %>
diff --git a/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index 9ad3697..24c059f 100644 --- a/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -21,11 +21,8 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.security.PrivilegedAction; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -39,9 +36,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.io.MapWritable; -import org.apache.zookeeper.KeeperException; /** * This class creates a single process HBase cluster. @@ -86,75 +81,6 @@ public class MiniHBaseCluster { } /** - * Override Master so can add inject behaviors testing. - */ - public static class MiniHBaseClusterMaster extends HMaster { - private final Map> messages = - new ConcurrentHashMap>(); - - private final Map exceptions = - new ConcurrentHashMap(); - - public MiniHBaseClusterMaster(final Configuration conf) - throws IOException, KeeperException, InterruptedException { - super(conf); - } - - /** - * Add a message to send to a regionserver next time it checks in. - * @param hsi RegionServer's HServerInfo. - * @param msg Message to add. - */ - void addMessage(final HServerInfo hsi, HMsg msg) { - synchronized(this.messages) { - List hmsgs = this.messages.get(hsi); - if (hmsgs == null) { - hmsgs = new ArrayList(); - this.messages.put(hsi, hmsgs); - } - hmsgs.add(msg); - } - } - - void addException(final HServerInfo hsi, final IOException ex) { - this.exceptions.put(hsi, ex); - } - - /** - * This implementation is special, exceptions will be treated first and - * message won't be sent back to the region servers even if some are - * specified. - * @param hsi the rs - * @param msgs Messages to add to - * @return - * @throws IOException will be throw if any added for this region server - */ - @Override - protected HMsg[] adornRegionServerAnswer(final HServerInfo hsi, - final HMsg[] msgs) throws IOException { - IOException ex = this.exceptions.remove(hsi); - if (ex != null) { - throw ex; - } - HMsg [] answerMsgs = msgs; - synchronized (this.messages) { - List hmsgs = this.messages.get(hsi); - if (hmsgs != null && !hmsgs.isEmpty()) { - int size = answerMsgs.length; - HMsg [] newAnswerMsgs = new HMsg[size + hmsgs.size()]; - System.arraycopy(answerMsgs, 0, newAnswerMsgs, 0, answerMsgs.length); - for (int i = 0; i < hmsgs.size(); i++) { - newAnswerMsgs[answerMsgs.length + i] = hmsgs.get(i); - } - answerMsgs = newAnswerMsgs; - hmsgs.clear(); - } - } - return super.adornRegionServerAnswer(hsi, answerMsgs); - } - } - - /** * Subclass so can get at protected methods (none at moment). Also, creates * a FileSystem instance per instantiation. Adds a shutdown own FileSystem * on the way out. Shuts down own Filesystem only, not All filesystems as @@ -170,10 +96,6 @@ public class MiniHBaseCluster { this.user = User.getCurrent(); } - public void setHServerInfo(final HServerInfo hsi) { - this.serverInfo = hsi; - } - /* * @param c * @param currentfs We return this if we did not make a new one. @@ -260,8 +182,7 @@ public class MiniHBaseCluster { try { // start up a LocalHBaseCluster hbaseCluster = new LocalHBaseCluster(conf, nMasterNodes, 0, - MiniHBaseCluster.MiniHBaseClusterMaster.class, - MiniHBaseCluster.MiniHBaseClusterRegionServer.class); + HMaster.class, MiniHBaseCluster.MiniHBaseClusterRegionServer.class); // manually add the regionservers as other users for (int i=0; i msgs = new ArrayList(); - HMsg hmsg = null; - final int size = 10; - for (int i = 0; i < size; i++) { - byte [] b = Bytes.toBytes(i); - hmsg = new HMsg(HMsg.Type.STOP_REGIONSERVER, - new HRegionInfo(new HTableDescriptor(Bytes.toBytes("test")), b, b)); - msgs.add(hmsg); - } - assertEquals(size, msgs.size()); - int index = msgs.indexOf(hmsg); - assertNotSame(-1, index); - msgs.remove(index); - assertEquals(size - 1, msgs.size()); - byte [] other = Bytes.toBytes("other"); - hmsg = new HMsg(HMsg.Type.STOP_REGIONSERVER, - new HRegionInfo(new HTableDescriptor(Bytes.toBytes("test")), other, other)); - assertEquals(-1, msgs.indexOf(hmsg)); - // Assert that two HMsgs are same if same content. - byte [] b = Bytes.toBytes(1); - hmsg = new HMsg(HMsg.Type.STOP_REGIONSERVER, - new HRegionInfo(new HTableDescriptor(Bytes.toBytes("test")), b, b)); - assertNotSame(-1, msgs.indexOf(hmsg)); - } - - public void testSerialization() throws IOException { - // Check out new HMsg that carries two daughter split regions. - byte [] abytes = Bytes.toBytes("a"); - byte [] bbytes = Bytes.toBytes("b"); - byte [] parentbytes = Bytes.toBytes("parent"); - HRegionInfo parent = - new HRegionInfo(new HTableDescriptor(Bytes.toBytes("parent")), - parentbytes, parentbytes); - // Assert simple HMsg serializes - HMsg hmsg = new HMsg(HMsg.Type.STOP_REGIONSERVER, parent); - byte [] bytes = Writables.getBytes(hmsg); - HMsg close = (HMsg)Writables.getWritable(bytes, new HMsg()); - assertTrue(close.equals(hmsg)); - // Assert split serializes - HRegionInfo daughtera = - new HRegionInfo(new HTableDescriptor(Bytes.toBytes("a")), abytes, abytes); - HRegionInfo daughterb = - new HRegionInfo(new HTableDescriptor(Bytes.toBytes("b")), bbytes, bbytes); - HMsg splithmsg = new HMsg(HMsg.Type.REGION_SPLIT, - parent, daughtera, daughterb, Bytes.toBytes("REGION_SPLIT")); - bytes = Writables.getBytes(splithmsg); - hmsg = (HMsg)Writables.getWritable(bytes, new HMsg()); - assertTrue(splithmsg.equals(hmsg)); - } -} diff --git a/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java b/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java new file mode 100644 index 0000000..82d5c0e --- /dev/null +++ b/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java @@ -0,0 +1,65 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +public class TestHRegionLocation { + @Test + public void testHashAndEqualsCode() { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HRegionLocation hrl1 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, + hsa1); + HRegionLocation hrl2 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, + hsa1); + assertEquals(hrl1.hashCode(), hrl2.hashCode()); + assertTrue(hrl1.equals(hrl2)); + HRegionLocation hrl3 = new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, + hsa1); + assertNotSame(hrl1, hrl3); + assertFalse(hrl1.equals(hrl3)); + } + + @Test + public void testToString() { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HRegionLocation hrl1 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, + hsa1); + System.out.println(hrl1.toString()); + } + + @Test + public void testCompareTo() { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HRegionLocation hsl1 = new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa1); + HServerAddress hsa2 = new HServerAddress("localhost", 1235); + HRegionLocation hsl2 = new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa2); + assertTrue(hsl1.compareTo(hsl1) == 0); + assertTrue(hsl2.compareTo(hsl2) == 0); + int compare1 = hsl1.compareTo(hsl2); + int compare2 = hsl2.compareTo(hsl1); + assertTrue((compare1 > 0)? compare2 < 0: compare2 > 0); + } +} \ No newline at end of file diff --git a/src/test/java/org/apache/hadoop/hbase/TestHServerAddress.java b/src/test/java/org/apache/hadoop/hbase/TestHServerAddress.java new file mode 100644 index 0000000..ed6e108 --- /dev/null +++ b/src/test/java/org/apache/hadoop/hbase/TestHServerAddress.java @@ -0,0 +1,89 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import org.apache.hadoop.hbase.util.Writables; +import org.junit.Test; + +/** + * Tests for {@link HServerAddress} + */ +public class TestHServerAddress { + @Test + public void testHashCode() { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HServerAddress hsa2 = new HServerAddress("localhost", 1234); + assertEquals(hsa1.hashCode(), hsa2.hashCode()); + HServerAddress hsa3 = new HServerAddress("localhost", 1235); + assertNotSame(hsa1.hashCode(), hsa3.hashCode()); + } + + @Test + public void testHServerAddress() { + new HServerAddress(); + } + + @Test + public void testHServerAddressInetSocketAddress() { + HServerAddress hsa1 = + new HServerAddress(new InetSocketAddress("localhost", 1234)); + System.out.println(hsa1.toString()); + } + + @Test + public void testHServerAddressString() { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HServerAddress hsa2 = + new HServerAddress(new InetSocketAddress("localhost", 1234)); + assertTrue(hsa1.equals(hsa2)); + } + + @Test (expected = IllegalArgumentException.class) + public void testHServerAddressString2() { + // Make address with wrong delimiter + new HServerAddress("localhost", 1234); + } + + @Test + public void testHServerAddressHServerAddress() { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HServerAddress hsa2 = new HServerAddress(hsa1); + assertEquals(hsa1, hsa2); + } + + @Test + public void testReadFields() throws IOException { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HServerAddress hsa2 = new HServerAddress("localhost", 1235); + byte [] bytes = Writables.getBytes(hsa1); + HServerAddress deserialized = + (HServerAddress)Writables.getWritable(bytes, new HServerAddress()); + assertEquals(hsa1, deserialized); + bytes = Writables.getBytes(hsa2); + deserialized = + (HServerAddress)Writables.getWritable(bytes, new HServerAddress()); + assertNotSame(hsa1, deserialized); + } +} \ No newline at end of file diff --git a/src/test/java/org/apache/hadoop/hbase/TestHServerInfo.java b/src/test/java/org/apache/hadoop/hbase/TestHServerInfo.java new file mode 100644 index 0000000..689d41b --- /dev/null +++ b/src/test/java/org/apache/hadoop/hbase/TestHServerInfo.java @@ -0,0 +1,80 @@ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.*; + +import java.io.IOException; + +import org.apache.hadoop.hbase.util.Writables; +import org.junit.Test; + +public class TestHServerInfo { + + @Test + public void testHashCodeAndEquals() { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678); + HServerInfo hsi2 = new HServerInfo(hsa1, 1L, 5678); + HServerInfo hsi3 = new HServerInfo(hsa1, 2L, 5678); + HServerInfo hsi4 = new HServerInfo(hsa1, 1L, 5677); + HServerAddress hsa2 = new HServerAddress("localhost", 1235); + HServerInfo hsi5 = new HServerInfo(hsa2, 1L, 5678); + assertEquals(hsi1.hashCode(), hsi2.hashCode()); + assertTrue(hsi1.equals(hsi2)); + assertNotSame(hsi1.hashCode(), hsi3.hashCode()); + assertFalse(hsi1.equals(hsi3)); + assertNotSame(hsi1.hashCode(), hsi4.hashCode()); + assertFalse(hsi1.equals(hsi4)); + assertNotSame(hsi1.hashCode(), hsi5.hashCode()); + assertFalse(hsi1.equals(hsi5)); + } + + @Test + public void testHServerInfoHServerInfo() { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678); + HServerInfo hsi2 = new HServerInfo(hsi1); + assertEquals(hsi1, hsi2); + } + + @Test + public void testGetServerAddress() { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678); + assertEquals(hsi1.getServerAddress(), hsa1); + } + + @Test + public void testToString() { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678); + System.out.println(hsi1.toString()); + } + + @Test + public void testReadFields() throws IOException { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678); + HServerAddress hsa2 = new HServerAddress("localhost", 1235); + HServerInfo hsi2 = new HServerInfo(hsa2, 1L, 5678); + byte [] bytes = Writables.getBytes(hsi1); + HServerInfo deserialized = + (HServerInfo)Writables.getWritable(bytes, new HServerInfo()); + assertEquals(hsi1, deserialized); + bytes = Writables.getBytes(hsi2); + deserialized = (HServerInfo)Writables.getWritable(bytes, new HServerInfo()); + assertNotSame(hsa1, deserialized); + } + + @Test + public void testCompareTo() { + HServerAddress hsa1 = new HServerAddress("localhost", 1234); + HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678); + HServerAddress hsa2 = new HServerAddress("localhost", 1235); + HServerInfo hsi2 = new HServerInfo(hsa2, 1L, 5678); + assertTrue(hsi1.compareTo(hsi1) == 0); + assertTrue(hsi2.compareTo(hsi2) == 0); + int compare1 = hsi1.compareTo(hsi2); + int compare2 = hsi2.compareTo(hsi1); + assertTrue((compare1 > 0)? compare2 < 0: compare2 > 0); + } +} \ No newline at end of file diff --git a/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java b/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java index 7c97d94..7f22492 100644 --- a/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java +++ b/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java @@ -169,7 +169,7 @@ public class TestRegionRebalancing extends HBaseClusterTestCase { int regionCount = getRegionCount(); List servers = getOnlineRegionServers(); - double avg = cluster.getMaster().getServerManager().getAverageLoad(); + double avg = cluster.getMaster().getAverageLoad(); int avgLoadPlusSlop = (int)Math.ceil(avg * (1 + slop)); int avgLoadMinusSlop = (int)Math.floor(avg * (1 - slop)) - 1; LOG.debug("There are " + servers.size() + " servers and " + regionCount diff --git a/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index befcdaf..56f1c6e 100644 --- a/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ b/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -84,21 +84,6 @@ public class TestSerialization { assertTrue(Bytes.equals("value".getBytes(), hmw.get("key".getBytes()))); } - @Test public void testHMsg() throws Exception { - final String name = "testHMsg"; - HMsg m = new HMsg(HMsg.Type.STOP_REGIONSERVER); - byte [] mb = Writables.getBytes(m); - HMsg deserializedHMsg = (HMsg)Writables.getWritable(mb, new HMsg()); - assertTrue(m.equals(deserializedHMsg)); - m = new HMsg(HMsg.Type.STOP_REGIONSERVER, - new HRegionInfo(new HTableDescriptor(name), - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY), - "Some message".getBytes()); - mb = Writables.getBytes(m); - deserializedHMsg = (HMsg)Writables.getWritable(mb, new HMsg()); - assertTrue(m.equals(deserializedHMsg)); - } - @Test public void testTableDescriptor() throws Exception { final String name = "testTableDescriptor"; HTableDescriptor htd = createTableDescriptor(name); @@ -129,19 +114,6 @@ public class TestSerialization { deserializedHri.getTableDesc().getFamilies().size()); } - /** - * Test ServerInfo serialization - * @throws Exception - */ - @Test public void testServerInfo() throws Exception { - HServerInfo hsi = new HServerInfo(new HServerAddress("0.0.0.0:123"), -1, - 1245, "default name"); - byte [] b = Writables.getBytes(hsi); - HServerInfo deserializedHsi = - (HServerInfo)Writables.getWritable(b, new HServerInfo()); - assertTrue(hsi.equals(deserializedHsi)); - } - @Test public void testPut() throws Exception{ byte[] row = "row".getBytes(); byte[] fam = "fam".getBytes(); diff --git a/src/test/java/org/apache/hadoop/hbase/TestServerName.java b/src/test/java/org/apache/hadoop/hbase/TestServerName.java new file mode 100644 index 0000000..ba94c10 --- /dev/null +++ b/src/test/java/org/apache/hadoop/hbase/TestServerName.java @@ -0,0 +1,62 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.*; + +import java.util.HashSet; +import java.util.Set; + +import org.junit.Test; + +public class TestServerName { + @Test + public void testServerName() { + ServerName sn = new ServerName("www.example.org", 1234, 5678); + assertEquals(sn.toString(), + ServerName.getServerName("www.example.org", 1234, 5678)); + assertEquals(sn.toString(), + ServerName.getServerName("www.example.org:1234", 5678)); + assertEquals(sn.toString(), + "www.example.org" + ServerName.SERVERNAME_SEPARATOR + + "1234" + ServerName.SERVERNAME_SEPARATOR + "5678"); + } + + @Test + public void getServerStartcodeFromServerName() { + ServerName sn = new ServerName("www.example.org", 1234, 5678); + assertEquals(5678, + ServerName.getServerStartcodeFromServerName(sn.toString())); + assertNotSame(5677, + ServerName.getServerStartcodeFromServerName(sn.toString())); + } + + @Test + public void testIsServer() { + ServerName sn = new ServerName("www.example.org", 1234, 5678); + ServerName sn80 = new ServerName("www.example.org", 80, 5678); + Set servers = new HashSet(); + servers.add(sn.toString()); + assertTrue(ServerName.isServer(servers, sn.toString(), false)); + assertFalse(ServerName.isServer(servers, sn80.toString(), false)); + assertTrue(ServerName.isServer(servers, "www.example.org:1234", true)); + assertFalse(ServerName.isServer(servers, "www.example.org:80", true)); + } +} \ No newline at end of file diff --git a/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java b/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java index e25184e..0a3c534 100644 --- a/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java +++ b/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java @@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Result; @@ -63,8 +63,8 @@ import org.mockito.Mockito; public class TestCatalogTracker { private static final Log LOG = LogFactory.getLog(TestCatalogTracker.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static final HServerAddress HSA = - new HServerAddress("example.org:1234"); + private static final ServerName HSA = + new ServerName("example.org", 1234, System.currentTimeMillis()); private ZooKeeperWatcher watcher; private Abortable abortable; @@ -115,7 +115,7 @@ public class TestCatalogTracker { final CatalogTracker ct = constructAndStartCatalogTracker(connection); try { RootLocationEditor.setRootLocation(this.watcher, - new HServerAddress("example.com:1234")); + new ServerName("example.com", 1234, System.currentTimeMillis())); } finally { // Clean out root location or later tests will be confused... they presume // start fresh in zk. @@ -131,9 +131,9 @@ public class TestCatalogTracker { @Test public void testInterruptWaitOnMetaAndRoot() throws IOException, InterruptedException { final CatalogTracker ct = constructAndStartCatalogTracker(); - HServerAddress hsa = ct.getRootLocation(); + ServerName hsa = ct.getRootLocation(); Assert.assertNull(hsa); - HServerAddress meta = ct.getMetaLocation(); + ServerName meta = ct.getMetaLocation(); Assert.assertNull(meta); Thread t = new Thread() { @Override @@ -169,7 +169,7 @@ public class TestCatalogTracker { final CatalogTracker ct = constructAndStartCatalogTracker(connection); try { RootLocationEditor.setRootLocation(this.watcher, - new HServerAddress("example.com:1234")); + new ServerName("example.com", 1234, System.currentTimeMillis())); Assert.assertFalse(ct.verifyMetaRegionLocation(100)); } finally { // Clean out root location or later tests will be confused... they presume @@ -200,7 +200,7 @@ public class TestCatalogTracker { final CatalogTracker ct = constructAndStartCatalogTracker(connection); try { RootLocationEditor.setRootLocation(this.watcher, - new HServerAddress("example.com:1234")); + new ServerName("example.com", 1234, System.currentTimeMillis())); Assert.assertFalse(ct.verifyRootRegionLocation(100)); } finally { // Clean out root location or later tests will be confused... they presume @@ -232,7 +232,7 @@ public class TestCatalogTracker { @Test public void testNoTimeoutWaitForRoot() throws IOException, InterruptedException, KeeperException { final CatalogTracker ct = constructAndStartCatalogTracker(); - HServerAddress hsa = ct.getRootLocation(); + ServerName hsa = ct.getRootLocation(); Assert.assertNull(hsa); // Now test waiting on root location getting set. @@ -246,7 +246,7 @@ public class TestCatalogTracker { Assert.assertTrue(ct.getRootLocation().equals(hsa)); } - private HServerAddress setRootLocation() throws KeeperException { + private ServerName setRootLocation() throws KeeperException { RootLocationEditor.setRootLocation(this.watcher, HSA); return HSA; } @@ -270,7 +270,7 @@ public class TestCatalogTracker { thenReturn(mockHRI); final CatalogTracker ct = constructAndStartCatalogTracker(connection); - HServerAddress hsa = ct.getMetaLocation(); + ServerName hsa = ct.getMetaLocation(); Assert.assertNull(hsa); // Now test waiting on meta location getting set. @@ -300,8 +300,7 @@ public class TestCatalogTracker { // been assigned. String node = ct.getMetaNodeTracker().getNode(); ZKUtil.createAndFailSilent(this.watcher, node); - MetaEditor.updateMetaLocation(ct, HRegionInfo.FIRST_META_REGIONINFO, - new HServerInfo(HSA, -1, "example.com")); + MetaEditor.updateMetaLocation(ct, HRegionInfo.FIRST_META_REGIONINFO, HSA); ZKUtil.deleteNode(this.watcher, node); // Join the thread... should exit shortly. t.join(); diff --git a/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java b/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java index 66c6959..614c906 100644 --- a/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java +++ b/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java @@ -23,7 +23,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.Test; @@ -53,7 +53,8 @@ public class TestCatalogTrackerOnCluster { LOG.error("Abort was called on 'bad root location writer'", e); } }); - HServerAddress nonsense = new HServerAddress("example.org:1234"); + ServerName nonsense = + new ServerName("example.org", 1234, System.currentTimeMillis()); RootLocationEditor.setRootLocation(zookeeper, nonsense); // Bring back up the hbase cluster. See if it can deal with nonsense root // location. diff --git a/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java b/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java index 43a8171..3d5c611 100644 --- a/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java +++ b/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; @@ -116,7 +116,7 @@ public class TestMetaReaderEditor { // Test it works getting a region from user table. List regions = MetaReader.getTableRegions(ct, nameBytes); assertEquals(regionCount, regions.size()); - Pair pair = + Pair pair = MetaReader.getRegion(ct, regions.get(0).getRegionName()); assertEquals(regions.get(0).getEncodedName(), pair.getFirst().getEncodedName()); diff --git a/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index e72924e..e640fed 100644 --- a/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ b/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -20,15 +20,24 @@ package org.apache.hadoop.hbase.coprocessor; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; @@ -41,13 +50,6 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.*; - /** * Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.MasterObserver} * interface hooks at all appropriate times during normal HMaster operations. @@ -217,14 +219,14 @@ public class TestMasterObserver { @Override public void preMove(MasterCoprocessorEnvironment env, - HRegionInfo region, HServerInfo srcServer, HServerInfo destServer) + HRegionInfo region, ServerName srcServer, ServerName destServer) throws UnknownRegionException { preMoveCalled = true; } @Override public void postMove(MasterCoprocessorEnvironment env, HRegionInfo region, - HServerInfo srcServer, HServerInfo destServer) + ServerName srcServer, ServerName destServer) throws UnknownRegionException { postMoveCalled = true; } @@ -445,15 +447,17 @@ public class TestMasterObserver { Map regions = table.getRegionsInfo(); assertFalse(regions.isEmpty()); - Map.Entry firstRegion = + Map.Entry firstRegion = regions.entrySet().iterator().next(); // try to force a move - Collection servers = master.getClusterStatus().getServerInfo(); + Collection servers = master.getClusterStatus().getServers(); String destName = null; - for (HServerInfo info : servers) { - if (!info.getServerAddress().equals(firstRegion.getValue())) { - destName = info.getServerName(); + for (ServerName info : servers) { + HServerAddress hsa = + new HServerAddress(info.getHostname(), info.getPort()); + if (!hsa.equals(firstRegion.getValue())) { + destName = info.toString(); break; } } @@ -471,7 +475,7 @@ public class TestMasterObserver { master.balanceSwitch(false); // move half the open regions from RS 0 to RS 1 HRegionServer rs = cluster.getRegionServer(0); - byte[] destRS = Bytes.toBytes(cluster.getRegionServer(1).getServerName()); + byte[] destRS = Bytes.toBytes(cluster.getRegionServer(1).getServerName().toString()); List openRegions = rs.getOnlineRegions(); int moveCnt = openRegions.size()/2; for (int i=0; i${HBASE_HOME}/bin/hbase ./bin/hbase org.apache.hadoop.hbase.OOMEHMaster start/code>. - */ -public class OOMEHMaster extends HMaster { - private List retainer = new ArrayList(); - - public OOMEHMaster(HBaseConfiguration conf) - throws IOException, KeeperException, InterruptedException { - super(conf); - } - - @Override - public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg[] msgs, - HRegionInfo[] mostLoadedRegions) - throws IOException { - // Retain 1M. - this.retainer.add(new byte [1024 * 1024]); - return super.regionServerReport(serverInfo, msgs, mostLoadedRegions); - } - - public static void main(String[] args) throws Exception { - new HMasterCommandLine(OOMEHMaster.class).doMain(args); - } -} diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index 1a19941..75397f7 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -30,9 +30,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -67,7 +68,7 @@ public class TestActiveMasterManager { } catch(KeeperException.NoNodeException nne) {} // Create the master node with a dummy address - HServerAddress master = new HServerAddress("localhost", 1); + ServerName master = new ServerName("localhost", 1, System.currentTimeMillis()); // Should not have a master yet DummyMaster dummyMaster = new DummyMaster(); ActiveMasterManager activeMasterManager = new ActiveMasterManager(zk, @@ -106,8 +107,10 @@ public class TestActiveMasterManager { } catch(KeeperException.NoNodeException nne) {} // Create the master node with a dummy address - HServerAddress firstMasterAddress = new HServerAddress("localhost", 1); - HServerAddress secondMasterAddress = new HServerAddress("localhost", 2); + ServerName firstMasterAddress = + new ServerName("localhost", 1, System.currentTimeMillis()); + ServerName secondMasterAddress = + new ServerName("localhost", 2, System.currentTimeMillis()); // Should not have a master yet DummyMaster ms1 = new DummyMaster(); @@ -177,8 +180,10 @@ public class TestActiveMasterManager { * @throws KeeperException */ private void assertMaster(ZooKeeperWatcher zk, - HServerAddress expectedAddress) throws KeeperException { - HServerAddress readAddress = ZKUtil.getDataAsAddress(zk, zk.masterAddressZNode); + ServerName expectedAddress) + throws KeeperException { + ServerName readAddress = + new ServerName(Bytes.toString(ZKUtil.getData(zk, zk.masterAddressZNode))); assertNotNull(readAddress); assertTrue(expectedAddress.equals(readAddress)); } @@ -188,8 +193,7 @@ public class TestActiveMasterManager { ActiveMasterManager manager; boolean isActiveMaster; - public WaitToBeMasterThread(ZooKeeperWatcher zk, - HServerAddress address) { + public WaitToBeMasterThread(ZooKeeperWatcher zk, ServerName address) { this.manager = new ActiveMasterManager(zk, address, new DummyMaster()); isActiveMaster = false; @@ -248,7 +252,7 @@ public class TestActiveMasterManager { } @Override - public String getServerName() { + public ServerName getServerName() { return null; } diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 3f332ff..9652420 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -84,7 +85,7 @@ public class TestCatalogJanitor { } @Override - public String getServerName() { + public ServerName getServerName() { // TODO Auto-generated method stub return null; } diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java index 915cdf6..a034c11 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java @@ -19,6 +19,8 @@ */ package org.apache.hadoop.hbase.master; +import java.net.InetSocketAddress; + import junit.framework.Assert; import org.apache.commons.logging.Log; @@ -26,9 +28,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClockOutOfSyncException; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.Test; @@ -53,7 +54,7 @@ public class TestClockSkewDetection { } @Override - public String getServerName() { + public ServerName getServerName() { return null; } @@ -72,22 +73,20 @@ public class TestClockSkewDetection { @Override public void stop(String why) { - }}, null, null); + }}, null); LOG.debug("regionServerStartup 1"); - HServerInfo hsi1 = new HServerInfo(new HServerAddress("example.org:1234"), - System.currentTimeMillis(), -1, "example.com"); - sm.regionServerStartup(hsi1, System.currentTimeMillis()); + InetSocketAddress isa1 = new InetSocketAddress("example.org", 1234); + sm.regionServerStartup(isa1, -1, System.currentTimeMillis()); long maxSkew = 30000; try { LOG.debug("regionServerStartup 2"); - HServerInfo hsi2 = new HServerInfo(new HServerAddress("example.org:1235"), - System.currentTimeMillis(), -1, "example.com"); - sm.regionServerStartup(hsi2, System.currentTimeMillis() - maxSkew * 2); + InetSocketAddress isa2 = new InetSocketAddress("example.org", 1235); + sm.regionServerStartup(isa2, -1, System.currentTimeMillis() - maxSkew * 2); Assert.assertTrue("HMaster should have thrown an ClockOutOfSyncException " - + "but didn't.", false); + + "but didn't.", false); } catch(ClockOutOfSyncException e) { //we want an exception LOG.info("Recieved expected exception: "+e); diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java b/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java index a1bb6af..5cb7c25 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java @@ -23,10 +23,12 @@ package org.apache.hadoop.hbase.master; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.net.InetSocketAddress; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.ipc.HMasterInterface; import org.apache.hadoop.ipc.RemoteException; @@ -43,12 +45,11 @@ public class TestHMasterRPCException { HMaster hm = new HMaster(conf); - HServerAddress hma = hm.getMasterAddress(); + ServerName sm = hm.getServerName(); + InetSocketAddress isa = new InetSocketAddress(sm.getHostname(), sm.getPort()); try { - HMasterInterface inf = - (HMasterInterface) HBaseRPC.getProxy( - HMasterInterface.class, HMasterInterface.VERSION, - hma.getInetSocketAddress(), conf, 100); + HMasterInterface inf = (HMasterInterface) HBaseRPC.getProxy( + HMasterInterface.class, HMasterInterface.VERSION, isa, conf, 100); inf.isMasterRunning(); fail(); } catch (RemoteException ex) { diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestLoadBalancer.java b/src/test/java/org/apache/hadoop/hbase/master/TestLoadBalancer.java index ca2a4bc..a09a1a0 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestLoadBalancer.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestLoadBalancer.java @@ -23,7 +23,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.util.ArrayList; -import java.util.Arrays; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -37,9 +37,8 @@ import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.LoadBalancer.RegionPlan; import org.apache.hadoop.hbase.util.Bytes; import org.junit.BeforeClass; @@ -146,13 +145,14 @@ public class TestLoadBalancer { public void testBalanceCluster() throws Exception { for(int [] mockCluster : clusterStateMocks) { - Map> servers = mockClusterServers(mockCluster); - LOG.info("Mock Cluster : " + printMock(servers) + " " + printStats(servers)); + Map> servers = mockClusterServers(mockCluster); + List list = convertToList(servers); + LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); List plans = loadBalancer.balanceCluster(servers); - List balancedCluster = reconcile(servers, plans); + List balancedCluster = reconcile(list, plans); LOG.info("Mock Balance : " + printMock(balancedCluster)); assertClusterAsBalanced(balancedCluster); - for(Map.Entry> entry : servers.entrySet()) { + for(Map.Entry> entry : servers.entrySet()) { returnRegions(entry.getValue()); returnServer(entry.getKey()); } @@ -164,13 +164,13 @@ public class TestLoadBalancer { * Invariant is that all servers have between floor(avg) and ceiling(avg) * number of regions. */ - public void assertClusterAsBalanced(List servers) { + public void assertClusterAsBalanced(List servers) { int numServers = servers.size(); int numRegions = 0; int maxRegions = 0; int minRegions = Integer.MAX_VALUE; - for(HServerInfo server : servers) { - int nr = server.getLoad().getNumberOfRegions(); + for(LoadBalancer.ServerAndLoad server : servers) { + int nr = server.getLoad(); if(nr > maxRegions) { maxRegions = nr; } @@ -186,9 +186,9 @@ public class TestLoadBalancer { int min = numRegions / numServers; int max = numRegions % numServers == 0 ? min : min + 1; - for(HServerInfo server : servers) { - assertTrue(server.getLoad().getNumberOfRegions() <= max); - assertTrue(server.getLoad().getNumberOfRegions() >= min); + for(LoadBalancer.ServerAndLoad server : servers) { + assertTrue(server.getLoad() <= max); + assertTrue(server.getLoad() >= min); } } @@ -204,12 +204,13 @@ public class TestLoadBalancer { for(int [] mock : regionsAndServersMocks) { LOG.debug("testImmediateAssignment with " + mock[0] + " regions and " + mock[1] + " servers"); List regions = randomRegions(mock[0]); - List servers = randomServers(mock[1], 0); - Map assignments = - LoadBalancer.immediateAssignment(regions, servers); - assertImmediateAssignment(regions, servers, assignments); + List servers = randomServers(mock[1], 0); + List list = getListOfServerNames(servers); + Map assignments = + LoadBalancer.immediateAssignment(regions, list); + assertImmediateAssignment(regions, list, assignments); returnRegions(regions); - returnServers(servers); + returnServers(list); } } @@ -220,7 +221,7 @@ public class TestLoadBalancer { * @param assignments */ private void assertImmediateAssignment(List regions, - List servers, Map assignments) { + List servers, Map assignments) { for(HRegionInfo region : regions) { assertTrue(assignments.containsKey(region)); } @@ -239,9 +240,10 @@ public class TestLoadBalancer { for(int [] mock : regionsAndServersMocks) { LOG.debug("testBulkAssignment with " + mock[0] + " regions and " + mock[1] + " servers"); List regions = randomRegions(mock[0]); - List servers = randomServers(mock[1], 0); - Map> assignments = - LoadBalancer.roundRobinAssignment(regions, servers); + List servers = randomServers(mock[1], 0); + List list = getListOfServerNames(servers); + Map> assignments = + LoadBalancer.roundRobinAssignment(regions, list); float average = (float)regions.size()/servers.size(); int min = (int)Math.floor(average); int max = (int)Math.ceil(average); @@ -251,7 +253,7 @@ public class TestLoadBalancer { } } returnRegions(regions); - returnServers(servers); + returnServers(list); } } @@ -263,31 +265,43 @@ public class TestLoadBalancer { @Test public void testRetainAssignment() throws Exception { // Test simple case where all same servers are there - List servers = randomServers(10, 10); + List servers = randomServers(10, 10); List regions = randomRegions(100); - Map existing = - new TreeMap(); - for (int i=0;i existing = + new TreeMap(); + for (int i = 0; i < regions.size(); i++) { + existing.put(regions.get(i), servers.get(i % servers.size()).getServerName()); } - Map> assignment = - LoadBalancer.retainAssignment(existing, servers); - assertRetainedAssignment(existing, servers, assignment); + List listOfServerNames = getListOfServerNames(servers); + Map> assignment = + LoadBalancer.retainAssignment(existing, listOfServerNames); + assertRetainedAssignment(existing, listOfServerNames, assignment); // Include two new servers that were not there before - List servers2 = new ArrayList(servers); + List servers2 = + new ArrayList(servers); servers2.add(randomServer(10)); servers2.add(randomServer(10)); - assignment = LoadBalancer.retainAssignment(existing, servers2); - assertRetainedAssignment(existing, servers2, assignment); + listOfServerNames = getListOfServerNames(servers2); + assignment = LoadBalancer.retainAssignment(existing, listOfServerNames); + assertRetainedAssignment(existing, listOfServerNames, assignment); // Remove two of the servers that were previously there - List servers3 = new ArrayList(servers); + List servers3 = + new ArrayList(servers); servers3.remove(servers3.size()-1); servers3.remove(servers3.size()-2); - assignment = LoadBalancer.retainAssignment(existing, servers3); - assertRetainedAssignment(existing, servers3, assignment); + listOfServerNames = getListOfServerNames(servers2); + assignment = LoadBalancer.retainAssignment(existing, listOfServerNames); + assertRetainedAssignment(existing, listOfServerNames, assignment); + } + + private List getListOfServerNames(final List sals) { + List list = new ArrayList(); + for (LoadBalancer.ServerAndLoad e: sals) { + list.add(e.getServerName()); + } + return list; } /** @@ -304,12 +318,12 @@ public class TestLoadBalancer { * @param assignment */ private void assertRetainedAssignment( - Map existing, List servers, - Map> assignment) { + Map existing, List servers, + Map> assignment) { // Verify condition 1, every region assigned, and to online server - Set onlineServerSet = new TreeSet(servers); + Set onlineServerSet = new TreeSet(servers); Set assignedRegions = new TreeSet(); - for (Map.Entry> a : assignment.entrySet()) { + for (Map.Entry> a : assignment.entrySet()) { assertTrue("Region assigned to server that was not listed as online", onlineServerSet.contains(a.getKey())); for (HRegionInfo r : a.getValue()) assignedRegions.add(r); @@ -317,23 +331,23 @@ public class TestLoadBalancer { assertEquals(existing.size(), assignedRegions.size()); // Verify condition 2, if server had existing assignment, must have same - Set onlineAddresses = new TreeSet(); - for (HServerInfo s : servers) onlineAddresses.add(s.getServerAddress()); - for (Map.Entry> a : assignment.entrySet()) { + Set onlineAddresses = new TreeSet(); + for (ServerName s : servers) onlineAddresses.add(s); + for (Map.Entry> a : assignment.entrySet()) { for (HRegionInfo r : a.getValue()) { - HServerAddress address = existing.get(r); + ServerName address = existing.get(r); if (address != null && onlineAddresses.contains(address)) { - assertTrue(a.getKey().getServerAddress().equals(address)); + assertTrue(a.getKey().equals(address)); } } } } - private String printStats(Map> servers) { + private String printStats(List servers) { int numServers = servers.size(); int totalRegions = 0; - for(HServerInfo server : servers.keySet()) { - totalRegions += server.getLoad().getNumberOfRegions(); + for(LoadBalancer.ServerAndLoad server : servers) { + totalRegions += server.getLoad(); } float average = (float)totalRegions / numServers; int max = (int)Math.ceil(average); @@ -341,20 +355,31 @@ public class TestLoadBalancer { return "[srvr=" + numServers + " rgns=" + totalRegions + " avg=" + average + " max=" + max + " min=" + min + "]"; } - private String printMock(Map> servers) { - return printMock(Arrays.asList(servers.keySet().toArray(new HServerInfo[servers.size()]))); + private List convertToList(final Map> servers) { + List list = + new ArrayList(servers.size()); + for (Map.Entry> e: servers.entrySet()) { + list.add(new LoadBalancer.ServerAndLoad(e.getKey(), e.getValue().size())); + } + return list; } - private String printMock(List balancedCluster) { - SortedSet sorted = new TreeSet(balancedCluster); - HServerInfo [] arr = sorted.toArray(new HServerInfo[sorted.size()]); + private String printMock(Map> servers) { + return printMock(convertToList(servers)); + } + + private String printMock(List balancedCluster) { + SortedSet sorted = + new TreeSet(balancedCluster); + LoadBalancer.ServerAndLoad [] arr = + sorted.toArray(new LoadBalancer.ServerAndLoad[sorted.size()]); StringBuilder sb = new StringBuilder(sorted.size() * 4 + 4); sb.append("{ "); - for(int i=0;i reconcile( - Map> servers, List plans) { - if(plans != null) { - for(RegionPlan plan : plans) { - plan.getSource().getLoad().setNumberOfRegions( - plan.getSource().getLoad().getNumberOfRegions() - 1); - plan.getDestination().getLoad().setNumberOfRegions( - plan.getDestination().getLoad().getNumberOfRegions() + 1); - } + private List reconcile(List list, + List plans) { + List result = + new ArrayList(list.size()); + if (plans == null) return result; + Map map = + new HashMap(list.size()); + for (RegionPlan plan : plans) { + ServerName source = plan.getSource(); + updateLoad(map, source, -1); + ServerName destination = plan.getDestination(); + updateLoad(map, destination, +1); } - return Arrays.asList(servers.keySet().toArray(new HServerInfo[servers.size()])); + result.clear(); + result.addAll(map.values()); + return result; + } + + private void updateLoad(Map map, + final ServerName sn, final int diff) { + LoadBalancer.ServerAndLoad sal = map.get(sn); + if (sal == null) return; + sal = new LoadBalancer.ServerAndLoad(sn, sal.getLoad() + diff); + map.put(sn, sal); } - private Map> mockClusterServers( + private Map> mockClusterServers( int [] mockCluster) { int numServers = mockCluster.length; - Map> servers = - new TreeMap>(); - for(int i=0;i> servers = + new TreeMap>(); + for(int i = 0; i < numServers; i++) { int numRegions = mockCluster[i]; - HServerInfo server = randomServer(numRegions); + LoadBalancer.ServerAndLoad sal = randomServer(0); List regions = randomRegions(numRegions); - servers.put(server, regions); + servers.put(sal.getServerName(), regions); } return servers; } @@ -420,36 +458,34 @@ public class TestLoadBalancer { regionQueue.addAll(regions); } - private Queue serverQueue = new LinkedList(); + private Queue serverQueue = new LinkedList(); - private HServerInfo randomServer(int numRegions) { - if(!serverQueue.isEmpty()) { - HServerInfo server = this.serverQueue.poll(); - server.getLoad().setNumberOfRegions(numRegions); - return server; + private LoadBalancer.ServerAndLoad randomServer(final int numRegionsPerServer) { + if (!this.serverQueue.isEmpty()) { + ServerName sn = this.serverQueue.poll(); + return new LoadBalancer.ServerAndLoad(sn, numRegionsPerServer); } String host = "127.0.0.1"; int port = rand.nextInt(60000); long startCode = rand.nextLong(); - HServerInfo hsi = - new HServerInfo(new HServerAddress(host, port), startCode, port, host); - hsi.getLoad().setNumberOfRegions(numRegions); - return hsi; + ServerName sn = new ServerName(host, port, startCode); + return new LoadBalancer.ServerAndLoad(sn, numRegionsPerServer); } - private List randomServers(int numServers, int numRegionsPerServer) { - List servers = new ArrayList(numServers); - for(int i=0;i randomServers(int numServers, int numRegionsPerServer) { + List servers = + new ArrayList(numServers); + for (int i = 0; i < numServers; i++) { servers.add(randomServer(numRegionsPerServer)); } return servers; } - private void returnServer(HServerInfo server) { + private void returnServer(ServerName server) { serverQueue.add(server); } - private void returnServers(List servers) { - serverQueue.addAll(servers); + private void returnServers(List servers) { + this.serverQueue.addAll(servers); } -} +} \ No newline at end of file diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestLogsCleaner.java b/src/test/java/org/apache/hadoop/hbase/master/TestLogsCleaner.java index 19220fb..fc05e47 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestLogsCleaner.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestLogsCleaner.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.replication.ReplicationZookeeper; import org.apache.hadoop.hbase.replication.regionserver.Replication; @@ -71,7 +72,8 @@ public class TestLogsCleaner { Path oldLogDir = new Path(HBaseTestingUtility.getTestDir(), HConstants.HREGION_OLDLOGDIR_NAME); - String fakeMachineName = URLEncoder.encode(server.getServerName(), "UTF8"); + String fakeMachineName = + URLEncoder.encode(server.getServerName().toString(), "UTF8"); FileSystem fs = FileSystem.get(conf); LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, oldLogDir); @@ -146,8 +148,8 @@ public class TestLogsCleaner { } @Override - public String getServerName() { - return "regionserver,60020,000000"; + public ServerName getServerName() { + return new ServerName("regionserver,60020,000000"); } @Override diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index c4ea83f..f473c80 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; @@ -75,7 +75,7 @@ public class TestMaster { TEST_UTIL.loadTable(new HTable(TEST_UTIL.getConfiguration(), TABLENAME), FAMILYNAME); - List> tableRegions = + List> tableRegions = MetaReader.getTableRegionsAndLocations(m.getCatalogTracker(), Bytes.toString(TABLENAME)); LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions)); @@ -106,10 +106,10 @@ public class TestMaster { // We have three regions because one is split-in-progress assertEquals(3, tableRegions.size()); LOG.info("Making sure we can call getTableRegionClosest while opening"); - Pair pair = + Pair pair = m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde")); LOG.info("Result is: " + pair); - Pair tableRegionFromName = + Pair tableRegionFromName = MetaReader.getRegion(m.getCatalogTracker(), pair.getFirst().getRegionName()); assertEquals(tableRegionFromName.getFirst(), pair.getFirst()); diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index 5a334e0..6b95eee 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.executor.RegionTransitionData; import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.master.AssignmentManager.RegionState; @@ -85,7 +85,7 @@ public class TestMasterFailover { // verify only one is the active master and we have right number int numActive = 0; int activeIndex = -1; - String activeName = null; + ServerName activeName = null; for (int i = 0; i < masterThreads.size(); i++) { if (masterThreads.get(i).getMaster().isActiveMaster()) { numActive++; @@ -278,8 +278,7 @@ public class TestMasterFailover { // Let's just assign everything to first RS HRegionServer hrs = cluster.getRegionServer(0); - String serverName = hrs.getServerName(); - HServerInfo hsiAlive = hrs.getServerInfo(); + ServerName serverName = hrs.getServerName(); // we'll need some regions to already be assigned out properly on live RS List enabledAndAssignedRegions = new ArrayList(); @@ -292,12 +291,12 @@ public class TestMasterFailover { // now actually assign them for (HRegionInfo hri : enabledAndAssignedRegions) { master.assignmentManager.regionPlans.put(hri.getEncodedName(), - new RegionPlan(hri, null, hsiAlive)); + new RegionPlan(hri, null, serverName)); master.assignRegion(hri); } for (HRegionInfo hri : disabledAndAssignedRegions) { master.assignmentManager.regionPlans.put(hri.getEncodedName(), - new RegionPlan(hri, null, hsiAlive)); + new RegionPlan(hri, null, serverName)); master.assignRegion(hri); } @@ -583,12 +582,10 @@ public class TestMasterFailover { // The first RS will stay online HRegionServer hrs = cluster.getRegionServer(0); - HServerInfo hsiAlive = hrs.getServerInfo(); // The second RS is going to be hard-killed HRegionServer hrsDead = cluster.getRegionServer(1); - String deadServerName = hrsDead.getServerName(); - HServerInfo hsiDead = hrsDead.getServerInfo(); + ServerName deadServerName = hrsDead.getServerName(); // we'll need some regions to already be assigned out properly on live RS List enabledAndAssignedRegions = new ArrayList(); @@ -601,12 +598,12 @@ public class TestMasterFailover { // now actually assign them for (HRegionInfo hri : enabledAndAssignedRegions) { master.assignmentManager.regionPlans.put(hri.getEncodedName(), - new RegionPlan(hri, null, hsiAlive)); + new RegionPlan(hri, null, hrs.getServerName())); master.assignRegion(hri); } for (HRegionInfo hri : disabledAndAssignedRegions) { master.assignmentManager.regionPlans.put(hri.getEncodedName(), - new RegionPlan(hri, null, hsiAlive)); + new RegionPlan(hri, null, hrs.getServerName())); master.assignRegion(hri); } @@ -621,12 +618,12 @@ public class TestMasterFailover { // set region plan to server to be killed and trigger assign for (HRegionInfo hri : enabledAndOnDeadRegions) { master.assignmentManager.regionPlans.put(hri.getEncodedName(), - new RegionPlan(hri, null, hsiDead)); + new RegionPlan(hri, null, deadServerName)); master.assignRegion(hri); } for (HRegionInfo hri : disabledAndOnDeadRegions) { master.assignmentManager.regionPlans.put(hri.getEncodedName(), - new RegionPlan(hri, null, hsiDead)); + new RegionPlan(hri, null, deadServerName)); master.assignRegion(hri); } diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java b/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java index dff6c1b..c0ea649 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.executor.EventHandler.EventType; @@ -72,11 +73,11 @@ public class TestRestartCluster { String unassignedZNode = zooKeeper.assignmentZNode; ZKUtil.createAndFailSilent(zooKeeper, unassignedZNode); - ZKAssign.createNodeOffline(zooKeeper, HRegionInfo.ROOT_REGIONINFO, - HMaster.MASTER); + ServerName sn = new ServerName(HMaster.MASTER, -1, System.currentTimeMillis()); - ZKAssign.createNodeOffline(zooKeeper, HRegionInfo.FIRST_META_REGIONINFO, - HMaster.MASTER); + ZKAssign.createNodeOffline(zooKeeper, HRegionInfo.ROOT_REGIONINFO, sn); + + ZKAssign.createNodeOffline(zooKeeper, HRegionInfo.FIRST_META_REGIONINFO, sn); LOG.debug("Created UNASSIGNED zNode for ROOT and META regions in state " + EventType.M_ZK_REGION_OFFLINE); @@ -132,4 +133,4 @@ public class TestRestartCluster { UTIL.waitTableAvailable(TABLE, 30000); } } -} +} \ No newline at end of file diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java b/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java index 6089ae6..566652d 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; @@ -155,7 +156,7 @@ public class TestRollingRestart { int num = 1; int total = regionServers.size(); for (RegionServerThread rst : regionServers) { - String serverName = rst.getRegionServer().getServerName(); + ServerName serverName = rst.getRegionServer().getServerName(); log("Stopping region server " + num + " of " + total + " [ " + serverName + "]"); rst.getRegionServer().stop("Stopping RS during rolling restart"); @@ -302,7 +303,7 @@ public class TestRollingRestart { } private void waitForRSShutdownToStartAndFinish(MasterThread activeMaster, - String serverName) throws InterruptedException { + ServerName serverName) throws InterruptedException { ServerManager sm = activeMaster.getMaster().getServerManager(); // First wait for it to be in dead list while (!sm.getDeadServers().contains(serverName)) { diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressManager.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressManager.java index 319a74e..47f52d6 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressManager.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressManager.java @@ -27,8 +27,8 @@ import java.util.concurrent.Semaphore; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.MasterAddressTracker; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -75,17 +75,17 @@ public class TestMasterAddressManager { // Create the master node with a dummy address String host = "localhost"; int port = 1234; - HServerAddress dummyAddress = new HServerAddress(host, port); + ServerName sn = new ServerName(host, port, System.currentTimeMillis()); LOG.info("Creating master node"); - ZKUtil.setAddressAndWatch(zk, zk.masterAddressZNode, dummyAddress); + ZKUtil.createEphemeralNodeAndWatch(zk, zk.masterAddressZNode, sn.getBytes()); // Wait for the node to be created LOG.info("Waiting for master address manager to be notified"); listener.waitForCreation(); LOG.info("Master node created"); assertTrue(addressManager.hasMaster()); - HServerAddress pulledAddress = addressManager.getMasterAddress(); - assertTrue(pulledAddress.equals(dummyAddress)); + ServerName pulledAddress = addressManager.getMasterAddress(); + assertTrue(pulledAddress.equals(sn)); } diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerZNodeContent.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerZNodeContent.java new file mode 100644 index 0000000..5259e62 --- /dev/null +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerZNodeContent.java @@ -0,0 +1,39 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; + +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; + +public class TestRegionServerZNodeContent { + @Test + public void testRegionServerZNodeContent() throws IOException { + final int port = 1234; + byte [] bytes = RegionServerZNodeContent.getRegionServerZNodeBytes(port); + System.out.println(Bytes.toString(bytes)); + RegionServerZNodeContent rszc = + RegionServerZNodeContent.getRSZNodeContent(bytes); + assertEquals(rszc.getWebuiport(), port); + } +} diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index 7ff6a2e..ef8a4b2 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.UnknownScannerException; @@ -263,11 +262,11 @@ public class TestScanner extends HBaseTestCase { // Store some new information - HServerAddress address = new HServerAddress("foo.bar.com:1234"); + String address = "foo.bar.com:1234"; put = new Put(ROW_KEY, System.currentTimeMillis(), null); put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(address.toString())); + Bytes.toBytes(address)); // put.add(HConstants.COL_STARTCODE, Bytes.toBytes(START_CODE)); @@ -301,12 +300,12 @@ public class TestScanner extends HBaseTestCase { // Now update the information again - address = new HServerAddress("bar.foo.com:4321"); + address = "bar.foo.com:4321"; put = new Put(ROW_KEY, System.currentTimeMillis(), null); put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(address.toString())); + Bytes.toBytes(address)); region.put(put); // Validate again diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 56909ce..f128f81 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -248,7 +248,7 @@ public class TestSplitTransactionOnCluster { HRegionServer hrs = getOtherRegionServer(cluster, metaRegionServer); LOG.info("Moving " + hri.getRegionNameAsString() + " to " + hrs.getServerName() + "; metaServerIndex=" + metaServerIndex); - admin.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(hrs.getServerName())); + admin.move(hri.getEncodedNameAsBytes(), hrs.getServerName().getBytes()); } // Wait till table region is up on the server that is NOT carrying .META.. while (true) { diff --git a/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 20a1ff8..c147a14 100644 --- a/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -19,6 +19,12 @@ */ package org.apache.hadoop.hbase.replication.regionserver; +import static org.junit.Assert.assertEquals; + +import java.net.URLEncoder; +import java.util.ArrayList; +import java.util.List; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -32,6 +38,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; @@ -45,16 +52,8 @@ import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; -import java.net.URLEncoder; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.junit.Assert.assertEquals; - public class TestReplicationSourceManager { private static final Log LOG = @@ -225,7 +224,7 @@ public class TestReplicationSourceManager { } @Override - public String getServerName() { + public ServerName getServerName() { return null; //To change body of implemented methods use File | Settings | File Templates. } diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index a055082..ac90a92 100644 --- a/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -26,8 +26,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -82,15 +82,15 @@ public class TestHBaseFsck { for (JVMClusterUtil.RegionServerThread rs : TEST_UTIL.getHBaseCluster().getRegionServerThreads()) { - HServerInfo hsi = rs.getRegionServer().getServerInfo(); + ServerName sn = rs.getRegionServer().getServerName(); // When we find a diff RS, change the assignment and break - if (startCode != hsi.getStartCode()) { + if (startCode != sn.getStartcode()) { Put put = new Put(res.getRow()); put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(hsi.getHostnamePort())); + Bytes.toBytes(sn.getHostAndPort())); put.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, - Bytes.toBytes(hsi.getStartCode())); + Bytes.toBytes(sn.getStartcode())); meta.put(put); break resforloop; }