(count);
@@ -260,4 +258,4 @@ public class ClusterStatus extends VersionedWritable {
}
this.clusterId = in.readUTF();
}
-}
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/HConstants.java b/src/main/java/org/apache/hadoop/hbase/HConstants.java
index ce0ea12..95370de 100644
--- a/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -367,6 +367,12 @@ public final class HConstants {
/** HBCK special code name used as server name when manipulating ZK nodes */
public static final String HBCK_CODE_NAME = "HBCKServerName";
+ public static final ServerName HBCK_CODE_SERVERNAME =
+ new ServerName(HBCK_CODE_NAME, -1, -1L);
+
+ public static final String KEY_FOR_HOSTNAME_SEEN_BY_MASTER =
+ "hbase.regionserver.hostname.seen.by.master";
+
public static final String HBASE_MASTER_LOGCLEANER_PLUGINS =
"hbase.master.logcleaner.plugins";
diff --git a/src/main/java/org/apache/hadoop/hbase/HMsg.java b/src/main/java/org/apache/hadoop/hbase/HMsg.java
deleted file mode 100644
index 87beb00..0000000
--- a/src/main/java/org/apache/hadoop/hbase/HMsg.java
+++ /dev/null
@@ -1,235 +0,0 @@
-/**
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.io.Writable;
-
-/**
- * HMsg is used to send messages between master and regionservers. Messages are
- * sent as payload on the regionserver-to-master heartbeats. Region assignment
- * does not use this mechanism. It goes via zookeeper.
- *
- * Most of the time the messages are simple but some messages are accompanied
- * by the region affected. HMsg may also carry an optional message.
- *
- *
TODO: Clean out all messages that go from master to regionserver; by
- * design, these are to go via zk from here on out.
- */
-public class HMsg implements Writable {
- public static final HMsg [] EMPTY_HMSG_ARRAY = new HMsg[0];
-
- public static enum Type {
- /**
- * When RegionServer receives this message, it goes into a sleep that only
- * an exit will cure. This message is sent by unit tests simulating
- * pathological states.
- */
- TESTING_BLOCK_REGIONSERVER,
- }
-
- private Type type = null;
- private HRegionInfo info = null;
- private byte[] message = null;
- private HRegionInfo daughterA = null;
- private HRegionInfo daughterB = null;
-
- /** Default constructor. Used during deserialization */
- public HMsg() {
- this(null);
- }
-
- /**
- * Construct a message with the specified message and empty HRegionInfo
- * @param type Message type
- */
- public HMsg(final HMsg.Type type) {
- this(type, new HRegionInfo(), null);
- }
-
- /**
- * Construct a message with the specified message and HRegionInfo
- * @param type Message type
- * @param hri Region to which message type applies
- */
- public HMsg(final HMsg.Type type, final HRegionInfo hri) {
- this(type, hri, null);
- }
-
- /**
- * Construct a message with the specified message and HRegionInfo
- *
- * @param type Message type
- * @param hri Region to which message type applies. Cannot be
- * null. If no info associated, used other Constructor.
- * @param msg Optional message (Stringified exception, etc.)
- */
- public HMsg(final HMsg.Type type, final HRegionInfo hri, final byte[] msg) {
- this(type, hri, null, null, msg);
- }
-
- /**
- * Construct a message with the specified message and HRegionInfo
- *
- * @param type Message type
- * @param hri Region to which message type applies. Cannot be
- * null. If no info associated, used other Constructor.
- * @param daughterA
- * @param daughterB
- * @param msg Optional message (Stringified exception, etc.)
- */
- public HMsg(final HMsg.Type type, final HRegionInfo hri,
- final HRegionInfo daughterA, final HRegionInfo daughterB, final byte[] msg) {
- this.type = type;
- if (hri == null) {
- throw new NullPointerException("Region cannot be null");
- }
- this.info = hri;
- this.message = msg;
- this.daughterA = daughterA;
- this.daughterB = daughterB;
- }
-
- /**
- * @return Region info or null if none associated with this message type.
- */
- public HRegionInfo getRegionInfo() {
- return this.info;
- }
-
- /** @return the type of message */
- public Type getType() {
- return this.type;
- }
-
- /**
- * @param other Message type to compare to
- * @return True if we are of same message type as other
- */
- public boolean isType(final HMsg.Type other) {
- return this.type.equals(other);
- }
-
- /** @return the message type */
- public byte[] getMessage() {
- return this.message;
- }
-
- /**
- * @return First daughter if Type is MSG_REPORT_SPLIT_INCLUDES_DAUGHTERS else
- * null
- */
- public HRegionInfo getDaughterA() {
- return this.daughterA;
- }
-
- /**
- * @return Second daughter if Type is MSG_REPORT_SPLIT_INCLUDES_DAUGHTERS else
- * null
- */
- public HRegionInfo getDaughterB() {
- return this.daughterB;
- }
-
- /**
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append(this.type.toString());
- // If null or empty region, don't bother printing it out.
- if (this.info != null && this.info.getRegionName().length > 0) {
- sb.append(": ");
- sb.append(this.info.getRegionNameAsString());
- }
- if (this.message != null && this.message.length > 0) {
- sb.append(": " + Bytes.toString(this.message));
- }
- return sb.toString();
- }
-
- /**
- * @see java.lang.Object#equals(java.lang.Object)
- */
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (getClass() != obj.getClass()) {
- return false;
- }
- HMsg that = (HMsg)obj;
- return this.type.equals(that.type) &&
- (this.info != null)? this.info.equals(that.info):
- that.info == null;
- }
-
- /**
- * @see java.lang.Object#hashCode()
- */
- @Override
- public int hashCode() {
- int result = this.type.hashCode();
- if (this.info != null) {
- result ^= this.info.hashCode();
- }
- return result;
- }
-
- // ////////////////////////////////////////////////////////////////////////////
- // Writable
- //////////////////////////////////////////////////////////////////////////////
-
- /**
- * @see org.apache.hadoop.io.Writable#write(java.io.DataOutput)
- */
- public void write(DataOutput out) throws IOException {
- out.writeInt(this.type.ordinal());
- this.info.write(out);
- if (this.message == null || this.message.length == 0) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- Bytes.writeByteArray(out, this.message);
- }
- }
-
- /**
- * @see org.apache.hadoop.io.Writable#readFields(java.io.DataInput)
- */
- public void readFields(DataInput in) throws IOException {
- int ordinal = in.readInt();
- this.type = HMsg.Type.values()[ordinal];
- this.info.readFields(in);
- boolean hasMessage = in.readBoolean();
- if (hasMessage) {
- this.message = Bytes.readByteArray(in);
- }
- }
-}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
index bd353b8..618cd2e 100644
--- a/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
+++ b/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
@@ -20,19 +20,20 @@
package org.apache.hadoop.hbase;
/**
- * Contains the HRegionInfo for the region and the HServerAddress for the
- * HRegionServer serving the region
+ * Data structure to hold HRegionInfo and the HServerAddress for the hosting
+ * HRegionServer. Immutable.
*/
public class HRegionLocation implements Comparable {
- // TODO: Is this class necessary? Why not just have a Pair?
- private HRegionInfo regionInfo;
- private HServerAddress serverAddress;
+ // TODO: Do we need a class to pass HServerAddress? Won't InetSocketAddress
+ // do? Need to clean up client references to HServerAddress first.
+ // St.Ack 02/15/2011.
+ private final HRegionInfo regionInfo;
+ private final HServerAddress serverAddress;
/**
* Constructor
- *
* @param regionInfo the HRegionInfo for the region
- * @param serverAddress the HServerAddress for the region server
+ * @param serverAddress the HServerAddress for the hosting region server
*/
public HRegionLocation(HRegionInfo regionInfo, HServerAddress serverAddress) {
this.regionInfo = regionInfo;
@@ -44,8 +45,8 @@ public class HRegionLocation implements Comparable {
*/
@Override
public String toString() {
- return "address: " + this.serverAddress.toString() + ", regioninfo: " +
- this.regionInfo.getRegionNameAsString();
+ return "region=" + this.regionInfo.getRegionNameAsString() +
+ ", address=" + this.serverAddress.toString();
}
/**
@@ -91,7 +92,7 @@ public class HRegionLocation implements Comparable {
public int compareTo(HRegionLocation o) {
int result = this.regionInfo.compareTo(o.regionInfo);
- if(result == 0) {
+ if (result == 0) {
result = this.serverAddress.compareTo(o.serverAddress);
}
return result;
diff --git a/src/main/java/org/apache/hadoop/hbase/HServerAddress.java b/src/main/java/org/apache/hadoop/hbase/HServerAddress.java
index 7f8a472..20255e7 100644
--- a/src/main/java/org/apache/hadoop/hbase/HServerAddress.java
+++ b/src/main/java/org/apache/hadoop/hbase/HServerAddress.java
@@ -19,25 +19,34 @@
*/
package org.apache.hadoop.hbase;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.WritableComparable;
-
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
-import java.net.InetSocketAddress;
import java.net.InetAddress;
+import java.net.InetSocketAddress;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.WritableComparable;
/**
- * HServerAddress is a "label" for a HBase server made of host and port number.
+ * HServerAddress hosts a {@link InetSocketAddress} and makes it
+ * {@link WritableComparable}. Resolves on construction AND on
+ * deserialization so could end up with different results if the
+ * two ends of serialization have different resolvers.
+ * Be careful where you use it. Should only be used when you need to pass
+ * an InetSocketAddress. Even then its a bad idea because of the above.
+ * Because of the resolve on either end of a serialization, this class is on its
+ * @deprecate Use {@link InetSocketAddress} or {@link ServerName}
*/
public class HServerAddress implements WritableComparable {
- private InetSocketAddress address;
- String stringValue;
+ private InetSocketAddress address = null;
+ private String cachedToString = "";
+ /**
+ * Constructor for deserialization use only.
+ */
public HServerAddress() {
- this.address = null;
- this.stringValue = null;
+ super();
}
/**
@@ -46,34 +55,20 @@ public class HServerAddress implements WritableComparable {
*/
public HServerAddress(InetSocketAddress address) {
this.address = address;
- this.stringValue = address.getAddress().getHostName() + ":" +
- address.getPort();
checkBindAddressCanBeResolved();
+ this.cachedToString = createCachedToString();
}
- /**
- * @param hostAndPort Hostname and port formatted as <hostname> ':' <port>
- */
- public HServerAddress(String hostAndPort) {
- int colonIndex = hostAndPort.lastIndexOf(':');
- if (colonIndex < 0) {
- throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort);
- }
- String host = hostAndPort.substring(0, colonIndex);
- int port = Integer.parseInt(hostAndPort.substring(colonIndex + 1));
- this.address = new InetSocketAddress(host, port);
- this.stringValue = address.getHostName() + ":" + port;
- checkBindAddressCanBeResolved();
+ private String createCachedToString() {
+ return this.address.toString();
}
/**
- * @param bindAddress Hostname
+ * @param hostname Hostname
* @param port Port number
*/
- public HServerAddress(String bindAddress, int port) {
- this.address = new InetSocketAddress(bindAddress, port);
- this.stringValue = address.getHostName() + ":" + port;
- checkBindAddressCanBeResolved();
+ public HServerAddress(final String hostname, final int port) {
+ this(new InetSocketAddress(hostname, port));
}
/**
@@ -81,45 +76,48 @@ public class HServerAddress implements WritableComparable {
* @param other HServerAddress to copy from
*/
public HServerAddress(HServerAddress other) {
- String bindAddress = other.getBindAddress();
- int port = other.getPort();
- this.address = new InetSocketAddress(bindAddress, port);
- stringValue = other.stringValue;
- checkBindAddressCanBeResolved();
+ this(new InetSocketAddress(other.getHostname(), other.getPort()));
}
- /** @return Bind address */
+ /** @return Bind address -- the raw IP, the result of a call to
+ * {@link InetSocketAddress#getAddress()#getHostAddress()} --
+ * or null if cannot resolve */
public String getBindAddress() {
- final InetAddress addr = address.getAddress();
- if (addr != null) {
- return addr.getHostAddress();
- } else {
- LogFactory.getLog(HServerAddress.class).error("Could not resolve the"
- + " DNS name of " + stringValue);
- return null;
- }
+ // This returns null if the address is not resolved.
+ final InetAddress addr = this.address.getAddress();
+ if (addr != null) return addr.getHostAddress();
+ LogFactory.getLog(HServerAddress.class).error("Could not resolve the"
+ + " DNS name of " + this.address.toString());
+ return null;
}
private void checkBindAddressCanBeResolved() {
if (getBindAddress() == null) {
throw new IllegalArgumentException("Could not resolve the"
- + " DNS name of " + stringValue);
+ + " DNS name of " + this.address.toString());
}
}
/** @return Port number */
public int getPort() {
- return address.getPort();
+ return this.address.getPort();
}
/** @return Hostname */
public String getHostname() {
- return address.getHostName();
+ return this.address.getHostName();
+ }
+
+ /**
+ * @return Returns ':'
+ */
+ public String getHostnameAndPort() {
+ return getHostname() + ":" + getPort();
}
/** @return The InetSocketAddress */
public InetSocketAddress getInetSocketAddress() {
- return address;
+ return this.address;
}
/**
@@ -127,27 +125,21 @@ public class HServerAddress implements WritableComparable {
*/
@Override
public String toString() {
- return stringValue == null ? "" : stringValue;
+ return this.cachedToString;
}
@Override
public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null) {
- return false;
- }
- if (getClass() != o.getClass()) {
- return false;
- }
- return compareTo((HServerAddress) o) == 0;
+ if (this == o) return true;
+ if (o == null) return false;
+ if (getClass() != o.getClass()) return false;
+ return compareTo((HServerAddress)o) == 0;
}
@Override
public int hashCode() {
- int result = address.hashCode();
- result ^= stringValue.hashCode();
+ int result = this.address.hashCode();
+ result ^= this.cachedToString.hashCode();
return result;
}
@@ -158,24 +150,20 @@ public class HServerAddress implements WritableComparable {
public void readFields(DataInput in) throws IOException {
String hostname = in.readUTF();
int port = in.readInt();
-
- if (hostname == null || hostname.length() == 0) {
- address = null;
- stringValue = null;
- } else {
- address = new InetSocketAddress(hostname, port);
- stringValue = hostname + ":" + port;
+ if (hostname != null && hostname.length() > 0) {
+ this.address = new InetSocketAddress(hostname, port);
checkBindAddressCanBeResolved();
+ createCachedToString();
}
}
public void write(DataOutput out) throws IOException {
- if (address == null) {
+ if (this.address == null) {
out.writeUTF("");
out.writeInt(0);
} else {
- out.writeUTF(address.getAddress().getHostName());
- out.writeInt(address.getPort());
+ out.writeUTF(this.address.getAddress().getHostName());
+ out.writeInt(this.address.getPort());
}
}
@@ -187,7 +175,7 @@ public class HServerAddress implements WritableComparable {
// Addresses as Strings may not compare though address is for the one
// server with only difference being that one address has hostname
// resolved whereas other only has IP.
- if (address.equals(o.address)) return 0;
+ if (this.address.equals(o.address)) return 0;
return toString().compareTo(o.toString());
}
-}
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/HServerInfo.java b/src/main/java/org/apache/hadoop/hbase/HServerInfo.java
index 7aa8bd9..49c06b1 100644
--- a/src/main/java/org/apache/hadoop/hbase/HServerInfo.java
+++ b/src/main/java/org/apache/hadoop/hbase/HServerInfo.java
@@ -23,77 +23,43 @@ import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.net.InetSocketAddress;
-import java.util.Comparator;
-import java.util.Set;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.io.VersionedWritable;
-import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
/**
- * HServerInfo is meta info about an {@link HRegionServer}. It is the token
- * by which a master distingushes a particular regionserver from the rest.
- * It holds hostname, ports, regionserver startcode, and load. Each server has
- * a servername where servername is made up of a concatenation of
- * hostname, port, and regionserver startcode. This servername is used in
- * various places identifying this regionserver. Its even used as part of
- * a pathname in the filesystem. As part of the initialization,
- * master will pass the regionserver the address that it knows this regionserver
- * by. In subsequent communications, the regionserver will pass a HServerInfo
- * with the master-supplied address.
+ * HServerInfo is meta info about an {@link HRegionServer}. It hosts the
+ * {@link HServerAddress}, its webui port, and its server startcode.
+ * @deprecated Use {@link InetSocketAddress} and or {@link ServerName}
*/
public class HServerInfo extends VersionedWritable
- implements WritableComparable {
+implements WritableComparable {
private static final byte VERSION = 0;
-
- /*
- * This character is used as separator between server hostname and port and
- * its startcode. Servername is formatted as
- * <hostname> '{@ink #SERVERNAME_SEPARATOR"}' <port> '{@ink #SERVERNAME_SEPARATOR"}' <startcode>.
- */
- private static final String SERVERNAME_SEPARATOR = ",";
-
private HServerAddress serverAddress;
private long startCode;
- private HServerLoad load;
- private int infoPort;
- // Servername is made of hostname, port and startcode.
- private String serverName = null;
- // Hostname of the regionserver.
- private String hostname;
- private String cachedHostnamePort = null;
+ private int webuiport;
- /** @return the object version number */
- public byte getVersion() {
- return VERSION;
- }
public HServerInfo() {
- this(new HServerAddress(), 0, HConstants.DEFAULT_REGIONSERVER_INFOPORT,
- "default name");
+ super();
}
/**
- * Constructor that creates a HServerInfo with a generated startcode and an
- * empty load.
- * @param serverAddress An {@link InetSocketAddress} encased in a {@link Writable}
- * @param infoPort Port the webui runs on.
- * @param hostname Server hostname.
+ * Constructor that creates a HServerInfo with a generated startcode
+ * @param serverAddress
+ * @param webuiport Port the webui runs on.
*/
- public HServerInfo(HServerAddress serverAddress, final int infoPort,
- final String hostname) {
- this(serverAddress, System.currentTimeMillis(), infoPort, hostname);
+ public HServerInfo(final HServerAddress serverAddress, final int webuiport) {
+ this(serverAddress, System.currentTimeMillis(), webuiport);
}
-
+
public HServerInfo(HServerAddress serverAddress, long startCode,
- final int infoPort, String hostname) {
+ final int webuiport) {
this.serverAddress = serverAddress;
this.startCode = startCode;
- this.load = new HServerLoad();
- this.infoPort = infoPort;
- this.hostname = hostname;
+ this.webuiport = webuiport;
}
/**
@@ -103,106 +69,32 @@ public class HServerInfo extends VersionedWritable
public HServerInfo(HServerInfo other) {
this.serverAddress = new HServerAddress(other.getServerAddress());
this.startCode = other.getStartCode();
- this.load = other.getLoad();
- this.infoPort = other.getInfoPort();
- this.hostname = other.hostname;
+ this.webuiport = other.getInfoPort();
}
- public HServerLoad getLoad() {
- return load;
- }
-
- public void setLoad(HServerLoad load) {
- this.load = load;
+ /** @return the object version number */
+ public byte getVersion() {
+ return VERSION;
}
public synchronized HServerAddress getServerAddress() {
return new HServerAddress(serverAddress);
}
- public synchronized void setServerAddress(HServerAddress serverAddress) {
- this.serverAddress = serverAddress;
- this.hostname = serverAddress.getHostname();
- this.serverName = null;
- }
-
public synchronized long getStartCode() {
return startCode;
}
public int getInfoPort() {
- return this.infoPort;
- }
-
- public String getHostname() {
- return this.hostname;
- }
-
- /**
- * @return The hostname and port concatenated with a ':' as separator.
- */
- public synchronized String getHostnamePort() {
- if (this.cachedHostnamePort == null) {
- this.cachedHostnamePort = getHostnamePort(this.hostname, this.serverAddress.getPort());
- }
- return this.cachedHostnamePort;
- }
-
- /**
- * @param hostname
- * @param port
- * @return The hostname and port concatenated with a ':' as separator.
- */
- public static String getHostnamePort(final String hostname, final int port) {
- return hostname + ":" + port;
- }
-
- /**
- * Gets the unique server instance name. Includes the hostname, port, and
- * start code.
- * @return Server name made of the concatenation of hostname, port and
- * startcode formatted as <hostname> ',' <port> ',' <startcode>
- */
- public synchronized String getServerName() {
- if (this.serverName == null) {
- this.serverName = getServerName(this.hostname,
- this.serverAddress.getPort(), this.startCode);
- }
- return this.serverName;
+ return getWebuiPort();
}
- public static synchronized String getServerName(final String hostAndPort,
- final long startcode) {
- int index = hostAndPort.indexOf(":");
- if (index <= 0) throw new IllegalArgumentException("Expected ':' ");
- return getServerName(hostAndPort.substring(0, index),
- Integer.parseInt(hostAndPort.substring(index + 1)), startcode);
+ public int getWebuiPort() {
+ return this.webuiport;
}
- /**
- * @param address Server address
- * @param startCode Server startcode
- * @return Server name made of the concatenation of hostname, port and
- * startcode formatted as <hostname> ',' <port> ',' <startcode>
- */
- public static String getServerName(HServerAddress address, long startCode) {
- return getServerName(address.getHostname(), address.getPort(), startCode);
- }
-
- /*
- * @param hostName
- * @param port
- * @param startCode
- * @return Server name made of the concatenation of hostname, port and
- * startcode formatted as <hostname> ',' <port> ',' <startcode>
- */
- public static String getServerName(String hostName, int port, long startCode) {
- StringBuilder name = new StringBuilder(hostName);
- name.append(SERVERNAME_SEPARATOR);
- name.append(port);
- name.append(SERVERNAME_SEPARATOR);
- name.append(startCode);
- return name.toString();
+ public String getHostname() {
+ return this.serverAddress.getHostname();
}
/**
@@ -211,81 +103,44 @@ public class HServerInfo extends VersionedWritable
* @see #getLoad()
*/
@Override
- public String toString() {
- return "serverName=" + getServerName() +
- ", load=(" + this.load.toString() + ")";
+ public synchronized String toString() {
+ return ServerName.getServerName(this.serverAddress.getHostnameAndPort(),
+ this.startCode);
}
@Override
public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (getClass() != obj.getClass()) {
- return false;
- }
+ if (this == obj) return true;
+ if (obj == null) return false;
+ if (getClass() != obj.getClass()) return false;
return compareTo((HServerInfo)obj) == 0;
}
@Override
public int hashCode() {
- return this.getServerName().hashCode();
+ int code = this.serverAddress.hashCode();
+ code ^= this.webuiport;
+ code ^= this.startCode;
+ return code;
}
public void readFields(DataInput in) throws IOException {
this.serverAddress.readFields(in);
this.startCode = in.readLong();
- this.load.readFields(in);
- this.infoPort = in.readInt();
- this.hostname = in.readUTF();
+ this.webuiport = in.readInt();
}
public void write(DataOutput out) throws IOException {
this.serverAddress.write(out);
out.writeLong(this.startCode);
- this.load.write(out);
- out.writeInt(this.infoPort);
- out.writeUTF(hostname);
+ out.writeInt(this.webuiport);
}
public int compareTo(HServerInfo o) {
- return this.getServerName().compareTo(o.getServerName());
- }
-
- /**
- * Orders HServerInfos by load then name. Natural/ascending order.
- */
- public static class LoadComparator implements Comparator {
- @Override
- public int compare(HServerInfo left, HServerInfo right) {
- int loadCompare = left.getLoad().compareTo(right.getLoad());
- return loadCompare != 0 ? loadCompare : left.compareTo(right);
- }
- }
-
- /**
- * Utility method that does a find of a servername or a hostandport combination
- * in the passed Set.
- * @param servers Set of server names
- * @param serverName Name to look for
- * @param hostAndPortOnly If serverName is a
- * hostname ':' port
- * or hostname , port , startcode.
- * @return True if serverName found in servers
- */
- public static boolean isServer(final Set servers,
- final String serverName, final boolean hostAndPortOnly) {
- if (!hostAndPortOnly) return servers.contains(serverName);
- String serverNameColonReplaced =
- serverName.replaceFirst(":", SERVERNAME_SEPARATOR);
- for (String hostPortStartCode: servers) {
- int index = hostPortStartCode.lastIndexOf(SERVERNAME_SEPARATOR);
- String hostPortStrippedOfStartCode = hostPortStartCode.substring(0, index);
- if (hostPortStrippedOfStartCode.equals(serverNameColonReplaced)) return true;
- }
- return false;
+ int compare = this.serverAddress.compareTo(o.getServerAddress());
+ if (compare != 0) return compare;
+ if (this.webuiport != o.getInfoPort()) return this.webuiport - o.getInfoPort();
+ if (this.startCode != o.getStartCode()) return (int)(this.startCode - o.getStartCode());
+ return 0;
}
-}
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/HServerLoad.java b/src/main/java/org/apache/hadoop/hbase/HServerLoad.java
index 2372053..5e55893 100644
--- a/src/main/java/org/apache/hadoop/hbase/HServerLoad.java
+++ b/src/main/java/org/apache/hadoop/hbase/HServerLoad.java
@@ -29,29 +29,31 @@ import java.util.TreeMap;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.io.VersionedWritable;
-import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
/**
- * This class encapsulates metrics for determining the load on a HRegionServer
+ * This class is used exporting current state of server load on a RegionServer.
*/
public class HServerLoad extends VersionedWritable
- implements WritableComparable {
+implements WritableComparable {
private static final byte VERSION = 0;
+ // Empty load instance.
+ public static final HServerLoad EMPTY_HSERVERLOAD = new HServerLoad();
+
+ /** Number of requests since last report
+ */
+ // TODO: Instead build this up out of region counters.
+ private int numberOfRequests = 0;
- /** number of regions */
- // could just use regionLoad.size() but master.RegionManager likes to play
- // around with this value while passing HServerLoad objects around during
- // balancer calculations
- private int numberOfRegions;
- /** number of requests since last report */
- private int numberOfRequests;
/** the amount of used heap, in MB */
- private int usedHeapMB;
+ private int usedHeapMB = 0;
+
/** the maximum allowable size of the heap, in MB */
- private int maxHeapMB;
+ private int maxHeapMB = 0;
+
/** per-region load metrics */
- private Map regionLoad = new TreeMap(Bytes.BYTES_COMPARATOR);
+ private Map regionLoad =
+ new TreeMap(Bytes.BYTES_COMPARATOR);
/** @return the object version number */
public byte getVersion() {
@@ -61,7 +63,14 @@ public class HServerLoad extends VersionedWritable
/**
* Encapsulates per-region loading metrics.
*/
- public static class RegionLoad implements Writable {
+ public static class RegionLoad extends VersionedWritable {
+ private static final byte VERSION = 0;
+
+ /** @return the object version number */
+ public byte getVersion() {
+ return VERSION;
+ }
+
/** the region name */
private byte[] name;
/** the number of stores for the region */
@@ -236,6 +245,8 @@ public class HServerLoad extends VersionedWritable
// Writable
public void readFields(DataInput in) throws IOException {
+ int version = in.readByte();
+ if (version != VERSION) throw new IOException("Version mismatch; " + version);
int namelen = in.readInt();
this.name = new byte[namelen];
in.readFully(this.name);
@@ -249,6 +260,7 @@ public class HServerLoad extends VersionedWritable
}
public void write(DataOutput out) throws IOException {
+ out.writeByte(VERSION);
out.writeInt(name.length);
out.write(name);
out.writeInt(stores);
@@ -308,10 +320,11 @@ public class HServerLoad extends VersionedWritable
* @param maxHeapMB
*/
public HServerLoad(final int numberOfRequests, final int usedHeapMB,
- final int maxHeapMB) {
+ final int maxHeapMB, final Map regionLoad) {
this.numberOfRequests = numberOfRequests;
this.usedHeapMB = usedHeapMB;
this.maxHeapMB = maxHeapMB;
+ this.regionLoad = regionLoad;
}
/**
@@ -319,7 +332,7 @@ public class HServerLoad extends VersionedWritable
* @param hsl the template HServerLoad
*/
public HServerLoad(final HServerLoad hsl) {
- this(hsl.numberOfRequests, hsl.usedHeapMB, hsl.maxHeapMB);
+ this(hsl.numberOfRequests, hsl.usedHeapMB, hsl.maxHeapMB, hsl.getRegionsLoad());
for (Map.Entry e : hsl.regionLoad.entrySet()) {
this.regionLoad.put(e.getKey(), e.getValue());
}
@@ -338,7 +351,7 @@ public class HServerLoad extends VersionedWritable
// int load = numberOfRequests == 0 ? 1 : numberOfRequests;
// load *= numberOfRegions == 0 ? 1 : numberOfRegions;
// return load;
- return numberOfRegions;
+ return this.regionLoad.size();
}
/**
@@ -356,6 +369,7 @@ public class HServerLoad extends VersionedWritable
* @return The load as a String
*/
public String toString(int msgInterval) {
+ int numberOfRegions = this.regionLoad.size();
StringBuilder sb = new StringBuilder();
sb = Strings.appendKeyValue(sb, "requests",
Integer.valueOf(numberOfRequests/msgInterval));
@@ -384,23 +398,13 @@ public class HServerLoad extends VersionedWritable
return compareTo((HServerLoad)o) == 0;
}
- /**
- * @see java.lang.Object#hashCode()
- */
- @Override
- public int hashCode() {
- int result = Integer.valueOf(numberOfRequests).hashCode();
- result ^= Integer.valueOf(numberOfRegions).hashCode();
- return result;
- }
-
// Getters
/**
* @return the numberOfRegions
*/
public int getNumberOfRegions() {
- return numberOfRegions;
+ return this.regionLoad.size();
}
/**
@@ -471,69 +475,15 @@ public class HServerLoad extends VersionedWritable
return count;
}
- // Setters
-
- /**
- * @param numberOfRegions the number of regions
- */
- public void setNumberOfRegions(int numberOfRegions) {
- this.numberOfRegions = numberOfRegions;
- }
-
- /**
- * @param numberOfRequests the number of requests to set
- */
- public void setNumberOfRequests(int numberOfRequests) {
- this.numberOfRequests = numberOfRequests;
- }
-
- /**
- * @param usedHeapMB the amount of heap in use, in MB
- */
- public void setUsedHeapMB(int usedHeapMB) {
- this.usedHeapMB = usedHeapMB;
- }
-
- /**
- * @param maxHeapMB the maximum allowable heap size, in MB
- */
- public void setMaxHeapMB(int maxHeapMB) {
- this.maxHeapMB = maxHeapMB;
- }
-
- /**
- * @param load Instance of HServerLoad
- */
- public void addRegionInfo(final HServerLoad.RegionLoad load) {
- this.numberOfRegions++;
- this.regionLoad.put(load.getName(), load);
- }
-
- /**
- * @param name
- * @param stores
- * @param storefiles
- * @param memstoreSizeMB
- * @param storefileIndexSizeMB
- * @param requestsCount
- * @deprecated Use {@link #addRegionInfo(RegionLoad)}
- */
- @Deprecated
- public void addRegionInfo(final byte[] name, final int stores,
- final int storefiles, final int storefileSizeMB,
- final int memstoreSizeMB, final int storefileIndexSizeMB,
- final int readRequestsCount, final int writeRequestsCount) {
- this.regionLoad.put(name, new HServerLoad.RegionLoad(name, stores, storefiles,
- storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB, readRequestsCount, writeRequestsCount));
- }
-
// Writable
public void readFields(DataInput in) throws IOException {
+ int version = in.readByte();
+ if (version != VERSION) throw new IOException("Version mismatch; " + version);
numberOfRequests = in.readInt();
usedHeapMB = in.readInt();
maxHeapMB = in.readInt();
- numberOfRegions = in.readInt();
+ int numberOfRegions = in.readInt();
for (int i = 0; i < numberOfRegions; i++) {
RegionLoad rl = new RegionLoad();
rl.readFields(in);
@@ -542,10 +492,11 @@ public class HServerLoad extends VersionedWritable
}
public void write(DataOutput out) throws IOException {
+ out.writeByte((byte)VERSION);
out.writeInt(numberOfRequests);
out.writeInt(usedHeapMB);
out.writeInt(maxHeapMB);
- out.writeInt(numberOfRegions);
+ out.writeInt(this.regionLoad.size());
for (RegionLoad rl: regionLoad.values())
rl.write(out);
}
@@ -555,4 +506,4 @@ public class HServerLoad extends VersionedWritable
public int compareTo(HServerLoad o) {
return this.getLoad() - o.getLoad();
}
-}
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index 0d696ab..5bc3bb0 100644
--- a/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -254,12 +254,10 @@ public class LocalHBaseCluster {
while (regionServerThread.isAlive()) {
try {
LOG.info("Waiting on " +
- regionServerThread.getRegionServer().getHServerInfo().toString());
+ regionServerThread.getRegionServer().toString());
regionServerThread.join();
} catch (InterruptedException e) {
e.printStackTrace();
- } catch (IOException e) {
- e.printStackTrace();
}
}
return regionServerThread.getName();
@@ -275,12 +273,10 @@ public class LocalHBaseCluster {
while (rst.isAlive()) {
try {
LOG.info("Waiting on " +
- rst.getRegionServer().getHServerInfo().toString());
+ rst.getRegionServer().toString());
rst.join();
} catch (InterruptedException e) {
e.printStackTrace();
- } catch (IOException e) {
- e.printStackTrace();
}
}
for (int i=0;i<hostname> ',' <port> ',' <startcode>.
- * If the master, it returns <hostname> ':' <port>'.
- * @return unique server name
+ * @return The unique server name for this server.
*/
- public String getServerName();
+ public ServerName getServerName();
}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/ServerName.java b/src/main/java/org/apache/hadoop/hbase/ServerName.java
new file mode 100644
index 0000000..cdbeb5b
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/hbase/ServerName.java
@@ -0,0 +1,210 @@
+/**
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.util.Set;
+
+import org.apache.hadoop.hbase.util.Addressing;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Utility handling server names where server names are formatted
+ * <hostname> '{@link #SERVERNAME_SEPARATOR"}' <port> '{@ink #SERVERNAME_SEPARATOR"}' <startcode>.
+ * A server name is used uniquely identifying a server instance. The startcode
+ * distingushes restarted servers on same hostname and port (startcode is
+ * usually timestamp of server startup). The format is safe to use in the
+ * filesystem and as znode name up in ZooKeeper. For example, if hostname is
+ * example.org, port is 1234, and the startcode for the regionserver is
+ * 1212121212, then ServerName would be example.org,1234,1212121212.
+ * Immutable.
+ */
+public class ServerName implements Comparable {
+ /**
+ * This character is used as separator between server hostname, port and
+ * startcode.
+ */
+ public static final String SERVERNAME_SEPARATOR = ",";
+
+ private final String servername;
+ private final String hostname;
+ private final int port;
+ private final long startcode;
+
+ public ServerName(final String hostname, final int port, final long startcode) {
+ this.hostname = hostname;
+ this.port = port;
+ this.startcode = startcode;
+ this.servername = getServerName(hostname, port, startcode);
+ }
+
+ public ServerName(final String serverName) {
+ this(parseHostname(serverName), parsePort(serverName),
+ parseStartcode(serverName));
+ }
+
+ public ServerName(final String hostAndPort, final long startCode) {
+ this(Addressing.parseHostname(hostAndPort),
+ Addressing.parsePort(hostAndPort), startCode);
+ }
+
+ public static String parseHostname(final String serverName) {
+ int index = serverName.indexOf(SERVERNAME_SEPARATOR);
+ return serverName.substring(0, index);
+ }
+
+ public static int parsePort(final String serverName) {
+ String [] split = serverName.split(SERVERNAME_SEPARATOR);
+ return Integer.parseInt(split[1]);
+ }
+
+ public static long parseStartcode(final String serverName) {
+ int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR);
+ return Long.parseLong(serverName.substring(index + 1));
+ }
+
+ @Override
+ public String toString() {
+ return getServerName();
+ }
+
+ public String getServerName() {
+ return servername;
+ }
+
+ public String getHostname() {
+ return hostname;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public long getStartcode() {
+ return startcode;
+ }
+
+ /**
+ * @param hostName
+ * @param port
+ * @param startcode
+ * @return Server name made of the concatenation of hostname, port and
+ * startcode formatted as <hostname> ',' <port> ',' <startcode>
+ */
+ public static String getServerName(String hostName, int port, long startcode) {
+ StringBuilder name = new StringBuilder(hostName);
+ name.append(SERVERNAME_SEPARATOR);
+ name.append(port);
+ name.append(SERVERNAME_SEPARATOR);
+ name.append(startcode);
+ return name.toString();
+ }
+
+ /**
+ * @param hostAndPort String in form of <hostname> ':' <port>
+ * @param startcode
+ * @return Server name made of the concatenation of hostname, port and
+ * startcode formatted as <hostname> ',' <port> ',' <startcode>
+ */
+ public static synchronized String getServerName(final String hostAndPort,
+ final long startcode) {
+ int index = hostAndPort.indexOf(":");
+ if (index <= 0) throw new IllegalArgumentException("Expected ':' ");
+ return getServerName(hostAndPort.substring(0, index),
+ Integer.parseInt(hostAndPort.substring(index + 1)), startcode);
+ }
+
+ /**
+ * @return Hostname and port formatted as described at
+ * {@link Addressing#createHostAndPortStr(String, int)}
+ */
+ public String getHostAndPort() {
+ return Addressing.createHostAndPortStr(this.hostname, this.port);
+ }
+
+ /**
+ * @return This instance serialized to bytes.
+ */
+ public byte [] getBytes() {
+ return Bytes.toBytes(toString());
+ }
+
+ /**
+ * @param serverName ServerName in form specified by {@link #getServerName()}
+ * @return The server start code parsed from servername
+ */
+ public static long getServerStartcodeFromServerName(final String serverName) {
+ int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR);
+ return Long.parseLong(serverName.substring(index + 1));
+ }
+
+ /**
+ * Utility method that does a find of a servername or a hostandport combination
+ * in the passed Set.
+ * @param servers Set of server names where server name is formatted as
+ * a {@link ServerName}.
+ * @return True if serverName found in servers
+ */
+ public boolean isServer(final Set servers) {
+ return isServer(servers, toString(), false);
+ }
+
+ /**
+ * Utility method that does a find of a servername or a hostandport combination
+ * in the passed Set.
+ * @param servers Set of server names where server name is formatted as
+ * a {@link ServerName}.
+ * @param serverName Name to look for formatted as a {@link ServerName} or
+ * as a hostname ':' port.
+ * @param hostAndPortOnly True if passed serverName is a
+ * hostname ':' port, false if its formatted as a
+ * {@link ServerName}.
+ * @return True if serverName found in servers
+ */
+ public static boolean isServer(final Set servers,
+ final String serverName, final boolean hostAndPortOnly) {
+ if (!hostAndPortOnly) return servers.contains(serverName);
+ String serverNameColonReplaced =
+ serverName.replaceFirst(":", SERVERNAME_SEPARATOR);
+ for (String hostPortStartCode: servers) {
+ int index = hostPortStartCode.lastIndexOf(SERVERNAME_SEPARATOR);
+ String hostPortStrippedOfStartCode = hostPortStartCode.substring(0, index);
+ if (hostPortStrippedOfStartCode.equals(serverNameColonReplaced)) return true;
+ }
+ return false;
+ }
+
+ @Override
+ public int compareTo(ServerName other) {
+ return this.servername.compareTo(other.getServerName());
+ }
+
+ @Override
+ public int hashCode() {
+ return this.servername.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null) return false;
+ if (!(o instanceof ServerName)) return false;
+ return this.compareTo((ServerName)o) == 0;
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java b/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java
index d7a1e67..df7e0f2 100644
--- a/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java
+++ b/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java
@@ -23,20 +23,17 @@ import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.List;
+import org.apache.avro.Schema;
+import org.apache.avro.generic.GenericArray;
+import org.apache.avro.generic.GenericData;
+import org.apache.avro.util.Utf8;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.hfile.Compression;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.avro.generated.AClusterStatus;
import org.apache.hadoop.hbase.avro.generated.AColumn;
import org.apache.hadoop.hbase.avro.generated.AColumnValue;
@@ -54,11 +51,13 @@ import org.apache.hadoop.hbase.avro.generated.AServerAddress;
import org.apache.hadoop.hbase.avro.generated.AServerInfo;
import org.apache.hadoop.hbase.avro.generated.AServerLoad;
import org.apache.hadoop.hbase.avro.generated.ATableDescriptor;
-
-import org.apache.avro.Schema;
-import org.apache.avro.generic.GenericArray;
-import org.apache.avro.generic.GenericData;
-import org.apache.avro.util.Utf8;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.hfile.Compression;
+import org.apache.hadoop.hbase.util.Bytes;
public class AvroUtil {
@@ -113,13 +112,13 @@ public class AvroUtil {
return asl;
}
- static public AServerInfo hsiToASI(HServerInfo hsi) throws IOException {
+ static public AServerInfo hsiToASI(ServerName sn, HServerLoad hsl) throws IOException {
AServerInfo asi = new AServerInfo();
- asi.infoPort = hsi.getInfoPort();
- asi.load = hslToASL(hsi.getLoad());
- asi.serverAddress = hsaToASA(hsi.getServerAddress());
- asi.serverName = new Utf8(hsi.getServerName());
- asi.startCode = hsi.getStartCode();
+ asi.infoPort = -1;
+ asi.load = hslToASL(hsl);
+ asi.serverAddress = hsaToASA(new HServerAddress(sn.getHostname(), sn.getPort()));
+ asi.serverName = new Utf8(sn.toString());
+ asi.startCode = sn.getStartcode();
return asi;
}
@@ -142,19 +141,19 @@ public class AvroUtil {
acs.hbaseVersion = new Utf8(cs.getHBaseVersion());
acs.regionsCount = cs.getRegionsCount();
acs.requestsCount = cs.getRequestsCount();
- Collection hserverInfos = cs.getServerInfo();
+ Collection hserverInfos = cs.getServers();
Schema s = Schema.createArray(AServerInfo.SCHEMA$);
GenericData.Array aserverInfos = null;
if (hserverInfos != null) {
aserverInfos = new GenericData.Array(hserverInfos.size(), s);
- for (HServerInfo hsi : hserverInfos) {
- aserverInfos.add(hsiToASI(hsi));
+ for (ServerName hsi : hserverInfos) {
+ aserverInfos.add(hsiToASI(hsi, cs.getLoad(hsi)));
}
} else {
aserverInfos = new GenericData.Array(0, s);
}
acs.serverInfos = aserverInfos;
- acs.servers = cs.getServers();
+ acs.servers = cs.getServers().size();
return acs;
}
diff --git a/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java b/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
index be31179..9522c89 100644
--- a/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
+++ b/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
@@ -68,7 +69,7 @@ public class CatalogTracker {
* server shutdown processing -- we need to know who had .META. last. If you
* want to know if the address is good, rely on {@link #metaAvailable} value.
*/
- private HServerAddress metaLocation;
+ private ServerName metaLocation;
private final int defaultTimeout;
private boolean stopped = false;
@@ -155,17 +156,18 @@ public class CatalogTracker {
/**
* Gets the current location for -ROOT- or null if location is
* not currently available.
- * @return location of root, null if not available
+ * @return server name
* @throws InterruptedException
*/
- public HServerAddress getRootLocation() throws InterruptedException {
+ public ServerName getRootLocation() throws InterruptedException {
return this.rootRegionTracker.getRootRegionLocation();
}
/**
- * @return Location of meta or null if not yet available.
+ * @return Location of server hosting meta region formatted as per
+ * {@link ServerName}, or null if none available
*/
- public HServerAddress getMetaLocation() {
+ public ServerName getMetaLocation() {
return this.metaLocation;
}
@@ -184,18 +186,19 @@ public class CatalogTracker {
* for up to the specified timeout if not immediately available. Returns null
* if the timeout elapses before root is available.
* @param timeout maximum time to wait for root availability, in milliseconds
- * @return location of root
+ * @return Location of server hosting root region,
+ * or null if none available
* @throws InterruptedException if interrupted while waiting
* @throws NotAllMetaRegionsOnlineException if root not available before
* timeout
*/
- HServerAddress waitForRoot(final long timeout)
+ ServerName waitForRoot(final long timeout)
throws InterruptedException, NotAllMetaRegionsOnlineException {
- HServerAddress address = rootRegionTracker.waitRootRegionLocation(timeout);
- if (address == null) {
+ ServerName sn = rootRegionTracker.waitRootRegionLocation(timeout);
+ if (sn == null) {
throw new NotAllMetaRegionsOnlineException("Timed out; " + timeout + "ms");
}
- return address;
+ return sn;
}
/**
@@ -238,11 +241,11 @@ public class CatalogTracker {
*/
private HRegionInterface getRootServerConnection()
throws IOException, InterruptedException {
- HServerAddress address = this.rootRegionTracker.getRootRegionLocation();
- if (address == null) {
+ ServerName sn = this.rootRegionTracker.getRootRegionLocation();
+ if (sn == null) {
return null;
}
- return getCachedConnection(address);
+ return getCachedConnection(sn);
}
/**
@@ -278,7 +281,7 @@ public class CatalogTracker {
if (rootConnection == null) {
return null;
}
- HServerAddress newLocation = MetaReader.readMetaLocation(rootConnection);
+ ServerName newLocation = MetaReader.readMetaLocation(rootConnection);
if (newLocation == null) {
return null;
}
@@ -317,7 +320,7 @@ public class CatalogTracker {
* @throws NotAllMetaRegionsOnlineException if meta not available before
* timeout
*/
- public HServerAddress waitForMeta(long timeout)
+ public ServerName waitForMeta(long timeout)
throws InterruptedException, IOException, NotAllMetaRegionsOnlineException {
long stop = System.currentTimeMillis() + timeout;
synchronized (metaAvailable) {
@@ -372,18 +375,21 @@ public class CatalogTracker {
this.metaAvailable.set(false);
}
- private void setMetaLocation(HServerAddress metaLocation) {
+ private void setMetaLocation(final ServerName metaLocation) {
metaAvailable.set(true);
this.metaLocation = metaLocation;
// no synchronization because these are private and already under lock
- metaAvailable.notifyAll();
+ this.metaAvailable.notifyAll();
}
- private HRegionInterface getCachedConnection(HServerAddress address)
+ private HRegionInterface getCachedConnection(ServerName sn)
throws IOException {
HRegionInterface protocol = null;
try {
- protocol = connection.getHRegionConnection(address, false);
+ // TODO: Remove. Its silly making an HSA. Just pass host and port and
+ // let connection figure it out.
+ HServerAddress hsa = new HServerAddress(sn.getHostname(), sn.getPort());
+ protocol = connection.getHRegionConnection(hsa, false);
} catch (RetriesExhaustedException e) {
if (e.getCause() != null && e.getCause() instanceof ConnectException) {
// Catch this; presume it means the cached connection has gone bad.
@@ -392,10 +398,10 @@ public class CatalogTracker {
}
} catch (SocketTimeoutException e) {
// Return 'protocol' == null.
- LOG.debug("Timed out connecting to " + address);
+ LOG.debug("Timed out connecting to " + sn);
} catch (SocketException e) {
// Return 'protocol' == null.
- LOG.debug("Exception connecting to " + address);
+ LOG.debug("Exception connecting to " + sn);
} catch (IOException ioe) {
Throwable cause = ioe.getCause();
if (cause != null && cause instanceof EOFException) {
@@ -412,7 +418,7 @@ public class CatalogTracker {
}
private boolean verifyRegionLocation(HRegionInterface metaServer,
- final HServerAddress address,
+ final ServerName address,
byte [] regionName)
throws IOException {
if (metaServer == null) {
@@ -469,7 +475,8 @@ public class CatalogTracker {
throw e;
}
return (connection == null)? false:
- verifyRegionLocation(connection,this.rootRegionTracker.getRootRegionLocation(),
+ verifyRegionLocation(connection,
+ this.rootRegionTracker.getRootRegionLocation(),
HRegionInfo.ROOT_REGIONINFO.getRegionName());
}
diff --git a/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java b/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
index c2ee031..5d61607 100644
--- a/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
+++ b/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
@@ -26,8 +26,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
@@ -87,18 +87,17 @@ public class MetaEditor {
}
public static void addDaughter(final CatalogTracker catalogTracker,
- final HRegionInfo regionInfo, final HServerInfo serverInfo)
+ final HRegionInfo regionInfo, final ServerName sn)
throws NotAllMetaRegionsOnlineException, IOException {
HRegionInterface server = catalogTracker.waitForMetaServerConnectionDefault();
byte [] catalogRegionName = CatalogTracker.META_REGION;
Put put = new Put(regionInfo.getRegionName());
addRegionInfo(put, regionInfo);
- if (serverInfo != null) addLocation(put, serverInfo);
+ if (sn != null) addLocation(put, sn);
server.put(catalogRegionName, put);
LOG.info("Added daughter " + regionInfo.getRegionNameAsString() +
" in region " + Bytes.toString(catalogRegionName) +
- (serverInfo == null?
- ", serverInfo=null": ", serverInfo=" + serverInfo.getServerName()));
+ (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
}
/**
@@ -110,18 +109,18 @@ public class MetaEditor {
*
* @param catalogTracker catalog tracker
* @param regionInfo region to update location of
- * @param serverInfo server the region is located on
+ * @param sn Server name
* @throws IOException
* @throws ConnectException Usually because the regionserver carrying .META.
* is down.
* @throws NullPointerException Because no -ROOT- server connection
*/
public static void updateMetaLocation(CatalogTracker catalogTracker,
- HRegionInfo regionInfo, HServerInfo serverInfo)
+ HRegionInfo regionInfo, ServerName sn)
throws IOException, ConnectException {
HRegionInterface server = catalogTracker.waitForRootServerConnectionDefault();
if (server == null) throw new IOException("No server for -ROOT-");
- updateLocation(server, CatalogTracker.ROOT_REGION, regionInfo, serverInfo);
+ updateLocation(server, CatalogTracker.ROOT_REGION, regionInfo, sn);
}
/**
@@ -133,14 +132,14 @@ public class MetaEditor {
*
* @param catalogTracker catalog tracker
* @param regionInfo region to update location of
- * @param serverInfo server the region is located on
+ * @param sn Server name
* @throws IOException
*/
public static void updateRegionLocation(CatalogTracker catalogTracker,
- HRegionInfo regionInfo, HServerInfo serverInfo)
+ HRegionInfo regionInfo, ServerName sn)
throws IOException {
updateLocation(catalogTracker.waitForMetaServerConnectionDefault(),
- CatalogTracker.META_REGION, regionInfo, serverInfo);
+ CatalogTracker.META_REGION, regionInfo, sn);
}
/**
@@ -152,20 +151,19 @@ public class MetaEditor {
* @param server connection to server hosting catalog region
* @param catalogRegionName name of catalog region being updated
* @param regionInfo region to update location of
- * @param serverInfo server the region is located on
+ * @param sn Server name
* @throws IOException In particular could throw {@link java.net.ConnectException}
* if the server is down on other end.
*/
private static void updateLocation(HRegionInterface server,
- byte [] catalogRegionName, HRegionInfo regionInfo, HServerInfo serverInfo)
+ byte [] catalogRegionName, HRegionInfo regionInfo, ServerName sn)
throws IOException {
Put put = new Put(regionInfo.getRegionName());
- addLocation(put, serverInfo);
+ addLocation(put, sn);
server.put(catalogRegionName, put);
LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
" in region " + Bytes.toString(catalogRegionName) + " with " +
- "server=" + serverInfo.getHostnamePort() + ", " +
- "startcode=" + serverInfo.getStartCode());
+ "serverName=" + sn.toString());
}
/**
@@ -228,11 +226,11 @@ public class MetaEditor {
return p;
}
- private static Put addLocation(final Put p, final HServerInfo hsi) {
+ private static Put addLocation(final Put p, final ServerName sn) {
p.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
- Bytes.toBytes(hsi.getHostnamePort()));
+ Bytes.toBytes(sn.getHostAndPort()));
p.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
- Bytes.toBytes(hsi.getStartCode()));
+ Bytes.toBytes(sn.getStartcode()));
return p;
}
}
diff --git a/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java b/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
index 6e22cf5..eb57197 100644
--- a/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
+++ b/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
@@ -30,11 +30,10 @@ import java.util.TreeSet;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
@@ -125,10 +124,11 @@ public class MetaReader {
* to META. If the region does not have an assignment it will have a null
* value in the map.
*
- * @return map of regions to their currently assigned server
+ * @return map of regions to their currently assigned server where server is
+ * a String of <host> ':' <port>
* @throws IOException
*/
- public static Map fullScan(
+ public static Map fullScan(
CatalogTracker catalogTracker)
throws IOException {
return fullScan(catalogTracker, new TreeSet());
@@ -147,7 +147,7 @@ public class MetaReader {
* @return map of regions to their currently assigned server
* @throws IOException
*/
- public static Map fullScan(
+ public static Map fullScan(
CatalogTracker catalogTracker, final Set disabledTables)
throws IOException {
return fullScan(catalogTracker, disabledTables, false);
@@ -168,17 +168,17 @@ public class MetaReader {
* @return map of regions to their currently assigned server
* @throws IOException
*/
- public static Map fullScan(
+ public static Map fullScan(
CatalogTracker catalogTracker, final Set disabledTables,
final boolean excludeOfflinedSplitParents)
throws IOException {
- final Map regions =
- new TreeMap();
+ final Map regions =
+ new TreeMap();
Visitor v = new Visitor() {
@Override
public boolean visit(Result r) throws IOException {
if (r == null || r.isEmpty()) return true;
- Pair region = metaRowToRegionPair(r);
+ Pair region = metaRowToRegionPair(r);
if (region == null) return true;
HRegionInfo hri = region.getFirst();
if (disabledTables.contains(
@@ -199,8 +199,6 @@ public class MetaReader {
* Returns a map of every region to it's currently assigned server, according
* to META. If the region does not have an assignment it will have a null
* value in the map.
- *
- * Returns HServerInfo which includes server startcode.
*
* @return map of regions to their currently assigned server
* @throws IOException
@@ -273,10 +271,10 @@ public class MetaReader {
/**
* Reads the location of META from ROOT.
* @param metaServer connection to server hosting ROOT
- * @return location of META in ROOT, null if not available
+ * @return location of META in ROOT where location, or null if not available
* @throws IOException
*/
- public static HServerAddress readMetaLocation(HRegionInterface metaServer)
+ public static ServerName readMetaLocation(HRegionInterface metaServer)
throws IOException {
return readLocation(metaServer, CatalogTracker.ROOT_REGION,
CatalogTracker.META_REGION);
@@ -286,10 +284,10 @@ public class MetaReader {
* Reads the location of the specified region from META.
* @param catalogTracker
* @param regionName region to read location of
- * @return location of region in META, null if not available
+ * @return location of META in ROOT where location is, or null if not available
* @throws IOException
*/
- public static HServerAddress readRegionLocation(CatalogTracker catalogTracker,
+ public static ServerName readRegionLocation(CatalogTracker catalogTracker,
byte [] regionName)
throws IOException {
if (isMetaRegion(regionName)) throw new IllegalArgumentException("See readMetaLocation");
@@ -297,14 +295,17 @@ public class MetaReader {
CatalogTracker.META_REGION, regionName);
}
- private static HServerAddress readLocation(HRegionInterface metaServer,
+ private static ServerName readLocation(HRegionInterface metaServer,
byte [] catalogRegionName, byte [] regionName)
throws IOException {
Result r = null;
try {
r = metaServer.get(catalogRegionName,
- new Get(regionName).addColumn(HConstants.CATALOG_FAMILY,
- HConstants.SERVER_QUALIFIER));
+ new Get(regionName).
+ addColumn(HConstants.CATALOG_FAMILY,
+ HConstants.SERVER_QUALIFIER).
+ addColumn(HConstants.CATALOG_FAMILY,
+ HConstants.STARTCODE_QUALIFIER));
} catch (java.net.SocketTimeoutException e) {
// Treat this exception + message as unavailable catalog table. Catch it
// and fall through to return a null
@@ -334,78 +335,57 @@ public class MetaReader {
if (r == null || r.isEmpty()) {
return null;
}
- byte [] value = r.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SERVER_QUALIFIER);
- return new HServerAddress(Bytes.toString(value));
+ return getServerNameFromResult(r);
}
/**
* Gets the region info and assignment for the specified region from META.
* @param catalogTracker
* @param regionName
- * @return region info and assignment from META, null if not available
+ * @return location of META in ROOT where location is
+ * a String of <host> ':' <port>, or null if not available
* @throws IOException
*/
- public static Pair getRegion(
+ public static Pair getRegion(
CatalogTracker catalogTracker, byte [] regionName)
throws IOException {
Get get = new Get(regionName);
get.addFamily(HConstants.CATALOG_FAMILY);
byte [] meta = getCatalogRegionNameForRegion(regionName);
Result r = catalogTracker.waitForMetaServerConnectionDefault().get(meta, get);
- if(r == null || r.isEmpty()) {
- return null;
- }
- return metaRowToRegionPair(r);
+ return (r == null || r.isEmpty())? null: metaRowToRegionPair(r);
}
/**
* @param data A .META. table row.
- * @return A pair of the regioninfo and the server address from data
- * or null for server address if no address set in .META. or null for a result
- * if no HRegionInfo found.
+ * @return A pair of the regioninfo and the ServerName
+ * (or null for server address if no address set in .META.).
* @throws IOException
*/
- public static Pair metaRowToRegionPair(
- Result data) throws IOException {
- byte [] bytes =
- data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+ public static Pair metaRowToRegionPair(Result data)
+ throws IOException {
+ byte [] bytes = data.getValue(HConstants.CATALOG_FAMILY,
+ HConstants.REGIONINFO_QUALIFIER);
if (bytes == null) return null;
HRegionInfo info = Writables.getHRegionInfo(bytes);
- final byte[] value = data.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SERVER_QUALIFIER);
- if (value != null && value.length > 0) {
- HServerAddress server = new HServerAddress(Bytes.toString(value));
- return new Pair(info, server);
- } else {
- return new Pair(info, null);
- }
+ ServerName sn = getServerNameFromResult(data);
+ // sn can be null in case where no server inof.
+ return new Pair(info, sn);
}
/**
- * @param data A .META. table row.
- * @return A pair of the regioninfo and the server info from data
- * (or null for server address if no address set in .META.).
- * @throws IOException
+ * @param data Result to interrogate.
+ * @return A ServerName instance or null if necessary fields not found or empty.
*/
- public static Pair metaRowToRegionPairWithInfo(
- Result data) throws IOException {
- byte [] bytes = data.getValue(HConstants.CATALOG_FAMILY,
- HConstants.REGIONINFO_QUALIFIER);
- if (bytes == null) return null;
- HRegionInfo info = Writables.getHRegionInfo(bytes);
- final byte[] value = data.getValue(HConstants.CATALOG_FAMILY,
+ private static ServerName getServerNameFromResult(final Result data) {
+ byte[] value = data.getValue(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER);
- if (value != null && value.length > 0) {
- final long startCode = Bytes.toLong(data.getValue(HConstants.CATALOG_FAMILY,
- HConstants.STARTCODE_QUALIFIER));
- HServerAddress server = new HServerAddress(Bytes.toString(value));
- HServerInfo hsi = new HServerInfo(server, startCode, 0,
- server.getHostname());
- return new Pair(info, hsi);
- } else {
- return new Pair(info, null);
- }
+ if (value == null || value.length == 0) return null;
+ String hostAndPort = Bytes.toString(value);
+ value = data.getValue(HConstants.CATALOG_FAMILY,
+ HConstants.STARTCODE_QUALIFIER);
+ if (value == null || value.length == 0) return null;
+ return new ServerName(hostAndPort, Bytes.toLong(value));
}
/**
@@ -528,26 +508,27 @@ public class MetaReader {
/**
* @param catalogTracker
* @param tableName
- * @return Return list of regioninfos and server addresses.
+ * @return Return list of regioninfos and server.
* @throws IOException
* @throws InterruptedException
*/
- public static List>
+ public static List>
getTableRegionsAndLocations(CatalogTracker catalogTracker, String tableName)
throws IOException, InterruptedException {
byte [] tableNameBytes = Bytes.toBytes(tableName);
if (Bytes.equals(tableNameBytes, HConstants.ROOT_TABLE_NAME)) {
// If root, do a bit of special handling.
- HServerAddress hsa = catalogTracker.getRootLocation();
- List> list =
- new ArrayList>();
- list.add(new Pair(HRegionInfo.ROOT_REGIONINFO, hsa));
+ ServerName serverName = catalogTracker.getRootLocation();
+ List> list =
+ new ArrayList>();
+ list.add(new Pair(HRegionInfo.ROOT_REGIONINFO,
+ serverName));
return list;
}
HRegionInterface metaServer =
getCatalogRegionInterface(catalogTracker, tableNameBytes);
- List> regions =
- new ArrayList>();
+ List> regions =
+ new ArrayList>();
Scan scan = getScanForTableName(tableNameBytes);
scan.addFamily(HConstants.CATALOG_FAMILY);
long scannerid =
@@ -556,7 +537,7 @@ public class MetaReader {
Result data;
while((data = metaServer.next(scannerid)) != null) {
if (data != null && data.size() > 0) {
- Pair region = metaRowToRegionPair(data);
+ Pair region = metaRowToRegionPair(data);
if (region == null) continue;
regions.add(region);
}
@@ -575,7 +556,7 @@ public class MetaReader {
* @throws IOException
*/
public static NavigableMap
- getServerUserRegions(CatalogTracker catalogTracker, final HServerInfo hsi)
+ getServerUserRegions(CatalogTracker catalogTracker, final ServerName serverName)
throws IOException {
HRegionInterface metaServer =
catalogTracker.waitForMetaServerConnectionDefault();
@@ -588,10 +569,9 @@ public class MetaReader {
Result result;
while((result = metaServer.next(scannerid)) != null) {
if (result != null && result.size() > 0) {
- Pair pair =
- metaRowToRegionPairWithInfo(result);
+ Pair pair = metaRowToRegionPair(result);
if (pair == null) continue;
- if (pair.getSecond() == null || !pair.getSecond().equals(hsi)) {
+ if (pair.getSecond() == null || !serverName.equals(pair.getSecond())) {
continue;
}
hris.put(pair.getFirst(), result);
diff --git a/src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java b/src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java
index aee64c5..1cbf1b6 100644
--- a/src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java
+++ b/src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.catalog;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -53,11 +53,11 @@ public class RootLocationEditor {
* Sets the location of -ROOT- in ZooKeeper to the
* specified server address.
* @param zookeeper zookeeper reference
- * @param location server address hosting root
+ * @param location The server hosting -ROOT-
* @throws KeeperException unexpected zookeeper exception
*/
public static void setRootLocation(ZooKeeperWatcher zookeeper,
- HServerAddress location)
+ final ServerName location)
throws KeeperException {
LOG.info("Setting ROOT region location in ZooKeeper as " + location);
try {
@@ -69,4 +69,4 @@ public class RootLocationEditor {
Bytes.toBytes(location.toString()));
}
}
-}
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index e022fe4..1586e7a 100644
--- a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionException;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
@@ -763,18 +764,15 @@ public class HBaseAdmin implements Abortable {
CatalogTracker ct = getCatalogTracker();
try {
if (hostAndPort != null) {
- HServerAddress hsa = new HServerAddress(hostAndPort);
- Pair pair =
- MetaReader.getRegion(ct, regionname);
+ Pair pair = MetaReader.getRegion(ct, regionname);
if (pair == null || pair.getSecond() == null) {
LOG.info("No server in .META. for " +
Bytes.toString(regionname) + "; pair=" + pair);
} else {
- closeRegion(hsa, pair.getFirst());
+ closeRegion(pair.getSecond(), pair.getFirst());
}
} else {
- Pair pair =
- MetaReader.getRegion(ct, regionname);
+ Pair pair = MetaReader.getRegion(ct, regionname);
if (pair == null || pair.getSecond() == null) {
LOG.info("No server in .META. for " +
Bytes.toString(regionname) + "; pair=" + pair);
@@ -787,8 +785,9 @@ public class HBaseAdmin implements Abortable {
}
}
- private void closeRegion(final HServerAddress hsa, final HRegionInfo hri)
+ private void closeRegion(final ServerName sn, final HRegionInfo hri)
throws IOException {
+ HServerAddress hsa = new HServerAddress(sn.getHostname(), sn.getPort());
HRegionInterface rs = this.connection.getHRegionConnection(hsa);
// Close the region without updating zk state.
rs.closeRegion(hri, false);
@@ -821,7 +820,7 @@ public class HBaseAdmin implements Abortable {
CatalogTracker ct = getCatalogTracker();
try {
if (isRegionName) {
- Pair pair =
+ Pair pair =
MetaReader.getRegion(getCatalogTracker(), tableNameOrRegionName);
if (pair == null || pair.getSecond() == null) {
LOG.info("No server in .META. for " +
@@ -830,10 +829,10 @@ public class HBaseAdmin implements Abortable {
flush(pair.getSecond(), pair.getFirst());
}
} else {
- List> pairs =
+ List> pairs =
MetaReader.getTableRegionsAndLocations(getCatalogTracker(),
Bytes.toString(tableNameOrRegionName));
- for (Pair pair: pairs) {
+ for (Pair pair: pairs) {
if (pair.getFirst().isOffline()) continue;
if (pair.getSecond() == null) continue;
try {
@@ -851,8 +850,9 @@ public class HBaseAdmin implements Abortable {
}
}
- private void flush(final HServerAddress hsa, final HRegionInfo hri)
+ private void flush(final ServerName sn, final HRegionInfo hri)
throws IOException {
+ HServerAddress hsa = new HServerAddress(sn.getHostname(), sn.getPort());
HRegionInterface rs = this.connection.getHRegionConnection(hsa);
rs.flushRegion(hri);
}
@@ -923,7 +923,7 @@ public class HBaseAdmin implements Abortable {
CatalogTracker ct = getCatalogTracker();
try {
if (isRegionName(tableNameOrRegionName)) {
- Pair pair =
+ Pair pair =
MetaReader.getRegion(ct, tableNameOrRegionName);
if (pair == null || pair.getSecond() == null) {
LOG.info("No server in .META. for " +
@@ -932,10 +932,10 @@ public class HBaseAdmin implements Abortable {
compact(pair.getSecond(), pair.getFirst(), major);
}
} else {
- List> pairs =
+ List> pairs =
MetaReader.getTableRegionsAndLocations(ct,
Bytes.toString(tableNameOrRegionName));
- for (Pair pair: pairs) {
+ for (Pair pair: pairs) {
if (pair.getFirst().isOffline()) continue;
if (pair.getSecond() == null) continue;
try {
@@ -954,9 +954,10 @@ public class HBaseAdmin implements Abortable {
}
}
- private void compact(final HServerAddress hsa, final HRegionInfo hri,
+ private void compact(final ServerName sn, final HRegionInfo hri,
final boolean major)
throws IOException {
+ HServerAddress hsa = new HServerAddress(sn.getHostname(), sn.getPort());
HRegionInterface rs = this.connection.getHRegionConnection(hsa);
rs.compactRegion(hri, major);
}
@@ -970,7 +971,7 @@ public class HBaseAdmin implements Abortable {
* @param destServerName The servername of the destination regionserver. If
* passed the empty byte array we'll assign to a random server. A server name
* is made of host, port and startcode. Here is an example:
- * host187.example.com,60020,1289493121758.
+ * host187.example.com,60020,1289493121758
* @throws UnknownRegionException Thrown if we can't find a region named
* encodedRegionName
* @throws ZooKeeperConnectionException
@@ -1078,7 +1079,7 @@ public class HBaseAdmin implements Abortable {
try {
if (isRegionName(tableNameOrRegionName)) {
// Its a possible region name.
- Pair pair =
+ Pair pair =
MetaReader.getRegion(getCatalogTracker(), tableNameOrRegionName);
if (pair == null || pair.getSecond() == null) {
LOG.info("No server in .META. for " +
@@ -1087,10 +1088,10 @@ public class HBaseAdmin implements Abortable {
split(pair.getSecond(), pair.getFirst(), splitPoint);
}
} else {
- List> pairs =
+ List> pairs =
MetaReader.getTableRegionsAndLocations(getCatalogTracker(),
Bytes.toString(tableNameOrRegionName));
- for (Pair pair: pairs) {
+ for (Pair pair: pairs) {
// May not be a server for a particular row
if (pair.getSecond() == null) continue;
HRegionInfo r = pair.getFirst();
@@ -1107,8 +1108,9 @@ public class HBaseAdmin implements Abortable {
}
}
- private void split(final HServerAddress hsa, final HRegionInfo hri,
+ private void split(final ServerName sn, final HRegionInfo hri,
byte[] splitPoint) throws IOException {
+ HServerAddress hsa = new HServerAddress(sn.getHostname(), sn.getPort());
HRegionInterface rs = this.connection.getHRegionConnection(hsa);
rs.splitRegion(hri, splitPoint);
}
diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index d8a2fc3..581ca0a 100644
--- a/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ b/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -199,8 +199,8 @@ public interface HConnection extends Abortable {
* @return proxy for HRegionServer
* @throws IOException if a remote or network exception occurs
*/
- public HRegionInterface getHRegionConnection(
- HServerAddress regionServer, boolean getMaster)
+ public HRegionInterface getHRegionConnection(HServerAddress regionServer,
+ boolean getMaster)
throws IOException;
/**
diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
index 53decd6..0c906b0 100644
--- a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
+++ b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
@@ -22,8 +22,16 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.lang.reflect.Proxy;
import java.lang.reflect.UndeclaredThrowableException;
-import java.util.*;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArraySet;
@@ -46,11 +54,17 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterAddressTracker;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.ipc.*;
+import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
+import org.apache.hadoop.hbase.ipc.ExecRPCInvoker;
+import org.apache.hadoop.hbase.ipc.HBaseRPC;
+import org.apache.hadoop.hbase.ipc.HMasterInterface;
+import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.SoftValueSortedMap;
@@ -339,7 +353,7 @@ public class HConnectionManager {
}
}
- HServerAddress masterLocation = null;
+ ServerName sn = null;
synchronized (this.masterLock) {
for (int tries = 0;
!this.closed &&
@@ -348,8 +362,8 @@ public class HConnectionManager {
tries++) {
try {
- masterLocation = masterAddressTracker.getMasterAddress();
- if(masterLocation == null) {
+ sn = masterAddressTracker.getMasterAddress();
+ if (sn == null) {
LOG.info("ZooKeeper available but no active master location found");
throw new MasterNotRunningException();
}
@@ -357,9 +371,11 @@ public class HConnectionManager {
if (clusterId.hasId()) {
conf.set(HConstants.CLUSTER_ID, clusterId.getId());
}
+ InetSocketAddress isa =
+ new InetSocketAddress(sn.getHostname(), sn.getPort());
HMasterInterface tryMaster = (HMasterInterface)HBaseRPC.getProxy(
- HMasterInterface.class, HMasterInterface.VERSION,
- masterLocation.getInetSocketAddress(), this.conf, this.rpcTimeout);
+ HMasterInterface.class, HMasterInterface.VERSION, isa, this.conf,
+ this.rpcTimeout);
if (tryMaster.isMasterRunning()) {
this.master = tryMaster;
@@ -390,10 +406,10 @@ public class HConnectionManager {
this.masterChecked = true;
}
if (this.master == null) {
- if (masterLocation == null) {
+ if (sn == null) {
throw new MasterNotRunningException();
}
- throw new MasterNotRunningException(masterLocation.toString());
+ throw new MasterNotRunningException(sn.toString());
}
return this.master;
}
@@ -576,12 +592,13 @@ public class HConnectionManager {
if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
try {
- HServerAddress hsa =
+ ServerName servername =
this.rootRegionTracker.waitRootRegionLocation(this.rpcTimeout);
LOG.debug("Lookedup root region location, connection=" + this +
- "; hsa=" + hsa);
- if (hsa == null) return null;
- return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa);
+ "; serverName=" + ((servername == null)? "": servername.toString()));
+ if (servername == null) return null;
+ return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO,
+ new HServerAddress(servername.getHostname(), servername.getPort()));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
@@ -630,11 +647,12 @@ public class HConnectionManager {
if (value == null) {
return true; // don't cache it
}
- final String serverAddress = Bytes.toString(value);
-
+ final String hostAndPort = Bytes.toString(value);
+ String hostname = Addressing.parseHostname(hostAndPort);
+ int port = Addressing.parsePort(hostAndPort);
// instantiate the location
HRegionLocation loc = new HRegionLocation(regionInfo,
- new HServerAddress(serverAddress));
+ new HServerAddress(hostname, port));
// cache this meta entry
cacheLocation(tableName, loc);
}
@@ -688,8 +706,7 @@ public class HConnectionManager {
metaLocation = locateRegion(parentTable, metaKey);
// If null still, go around again.
if (metaLocation == null) continue;
- HRegionInterface server =
- getHRegionConnection(metaLocation.getServerAddress());
+ HRegionInterface server = getHRegionConnection(metaLocation.getServerAddress());
Result regionInfoRow = null;
// This block guards against two threads trying to load the meta
@@ -724,7 +741,7 @@ public class HConnectionManager {
if (regionInfoRow == null) {
throw new TableNotFoundException(Bytes.toString(tableName));
}
- byte[] value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY,
+ byte [] value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER);
if (value == null || value.length == 0) {
throw new IOException("HRegionInfo was null or empty in " +
@@ -745,19 +762,20 @@ public class HConnectionManager {
value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER);
- String serverAddress = "";
- if(value != null) {
- serverAddress = Bytes.toString(value);
+ String hostAndPort = "";
+ if (value != null) {
+ hostAndPort = Bytes.toString(value);
}
- if (serverAddress.equals("")) {
+ if (hostAndPort.equals("")) {
throw new NoServerForRegionException("No server address listed " +
"in " + Bytes.toString(parentTable) + " for region " +
regionInfo.getRegionNameAsString());
}
- // instantiate the location
- location = new HRegionLocation(regionInfo,
- new HServerAddress(serverAddress));
+ // Instantiate the location
+ String hostname = Addressing.parseHostname(hostAndPort);
+ int port = Addressing.parsePort(hostAndPort);
+ location = new HRegionLocation(regionInfo, new HServerAddress(hostname, port));
cacheLocation(tableName, location);
return location;
} catch (TableNotFoundException e) {
@@ -943,14 +961,25 @@ public class HConnectionManager {
}
}
- public HRegionInterface getHRegionConnection(
- HServerAddress regionServer, boolean getMaster)
+ public HRegionInterface getHRegionConnection(HServerAddress hsa)
throws IOException {
- if (getMaster) {
- getMaster();
- }
+ return getHRegionConnection(hsa, false);
+ }
+
+ public HRegionInterface getHRegionConnection(HServerAddress hsa, boolean master)
+ throws IOException {
+ return getHRegionConnection(hsa.getInetSocketAddress(), master);
+ }
+
+ // TODO: CHange getHRegionConnection to take InetSocketAddress and not
+ // HSA -- St.Ack
+
+ public HRegionInterface getHRegionConnection(final InetSocketAddress isa,
+ final boolean master)
+ throws IOException {
+ if (master) getMaster();
HRegionInterface server;
- String rsName = regionServer.toString();
+ String rsName = isa.toString();
// See if we already have a connection (common case)
server = this.servers.get(rsName);
if (server == null) {
@@ -968,9 +997,9 @@ public class HConnectionManager {
// definitely a cache miss. establish an RPC for this RS
server = (HRegionInterface) HBaseRPC.waitForProxy(
serverInterfaceClass, HRegionInterface.VERSION,
- regionServer.getInetSocketAddress(), this.conf,
+ isa, this.conf,
this.maxRPCAttempts, this.rpcTimeout, this.rpcTimeout);
- this.servers.put(rsName, server);
+ this.servers.put(isa.toString(), server);
} catch (RemoteException e) {
LOG.warn("RemoteException connecting to RS", e);
// Throw what the RemoteException was carrying.
@@ -982,12 +1011,6 @@ public class HConnectionManager {
return server;
}
- public HRegionInterface getHRegionConnection(
- HServerAddress regionServer)
- throws IOException {
- return getHRegionConnection(regionServer, false);
- }
-
/**
* Get the ZooKeeper instance for this TableServers instance.
*
@@ -1216,17 +1239,17 @@ public class HConnectionManager {
Row row = workingList.get(i);
if (row != null) {
HRegionLocation loc = locateRegion(tableName, row.getRow(), true);
- HServerAddress address = loc.getServerAddress();
+ HServerAddress a = loc.getServerAddress();
byte[] regionName = loc.getRegionInfo().getRegionName();
- MultiAction actions = actionsByServer.get(address);
+ MultiAction actions = actionsByServer.get(a);
if (actions == null) {
actions = new MultiAction();
- actionsByServer.put(address, actions);
+ actionsByServer.put(a, actions);
}
Action action = new Action(regionName, row, i);
- lastServers[i] = address;
+ lastServers[i] = a;
actions.add(regionName, action);
}
}
@@ -1237,8 +1260,7 @@ public class HConnectionManager {
new HashMap>(
actionsByServer.size());
- for (Entry> e
- : actionsByServer.entrySet()) {
+ for (Entry> e: actionsByServer.entrySet()) {
futures.put(e.getKey(), pool.submit(createCallable(e.getKey(), e.getValue(), tableName)));
}
@@ -1420,7 +1442,7 @@ public class HConnectionManager {
final Map regions) {
for (Map.Entry e : regions.entrySet()) {
cacheLocation(tableName,
- new HRegionLocation(e.getKey(), e.getValue()));
+ new HRegionLocation(e.getKey(), e.getValue()));
}
}
diff --git a/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index bb3a8fa..3d8115a 100644
--- a/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
import org.apache.hadoop.hbase.ipc.ExecRPCInvoker;
+import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Writables;
@@ -418,8 +419,8 @@ public class HTable implements HTableInterface {
byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER);
if (value != null && value.length > 0) {
- String address = Bytes.toString(value);
- server = new HServerAddress(address);
+ String hostAndPort = Bytes.toString(value);
+ server = new HServerAddress(Addressing.createInetSocketAddressFromHostAndPortStr(hostAndPort));
}
if (!(info.isOffline() || info.isSplit())) {
diff --git a/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java b/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
index 6c62024..f30bf00 100644
--- a/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
+++ b/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
@@ -50,7 +50,7 @@ public class RetriesExhaustedWithDetailsException extends RetriesExhaustedExcept
List addresses) {
super("Failed " + exceptions.size() + " action" +
pluralize(exceptions) + ": " +
- getDesc(exceptions,actions,addresses));
+ getDesc(exceptions, actions, addresses));
this.exceptions = exceptions;
this.actions = actions;
diff --git a/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
index 9576c48..d30a5ad 100644
--- a/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
+++ b/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
@@ -22,8 +22,8 @@ package org.apache.hadoop.hbase.coprocessor;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.UnknownRegionException;
import java.io.IOException;
@@ -111,13 +111,13 @@ public class BaseMasterObserver implements MasterObserver {
@Override
public void preMove(MasterCoprocessorEnvironment env, HRegionInfo region,
- HServerInfo srcServer, HServerInfo destServer)
+ ServerName srcServer, ServerName destServer)
throws UnknownRegionException {
}
@Override
public void postMove(MasterCoprocessorEnvironment env, HRegionInfo region,
- HServerInfo srcServer, HServerInfo destServer)
+ ServerName srcServer, ServerName destServer)
throws UnknownRegionException {
}
diff --git a/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index db0870b..8e4a561 100644
--- a/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -138,14 +138,14 @@ public interface MasterObserver extends Coprocessor {
* Called prior to moving a given region from one region server to another.
*/
void preMove(MasterCoprocessorEnvironment env, final HRegionInfo region,
- final HServerInfo srcServer, final HServerInfo destServer)
+ final ServerName srcServer, final ServerName destServer)
throws UnknownRegionException;
/**
* Called after the region move has been requested.
*/
void postMove(MasterCoprocessorEnvironment env, final HRegionInfo region,
- final HServerInfo srcServer, final HServerInfo destServer)
+ final ServerName srcServer, final ServerName destServer)
throws UnknownRegionException;
/**
diff --git a/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
index de13e27..85ef70e 100644
--- a/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
+++ b/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
@@ -223,4 +223,4 @@ public abstract class EventHandler implements Runnable, Comparable {
public synchronized void setListener(EventHandlerListener listener) {
this.listener = listener;
}
-}
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java b/src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java
index a55f9d6..0f49dc1 100644
--- a/src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java
+++ b/src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java
@@ -23,6 +23,7 @@ import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.executor.EventHandler.EventType;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
@@ -42,7 +43,7 @@ public class RegionTransitionData implements Writable {
private byte [] regionName;
/** Server event originated from. Optional. */
- private String serverName;
+ private ServerName origin;
/** Time the event was created. Required but automatically set. */
private long stamp;
@@ -89,11 +90,11 @@ public class RegionTransitionData implements Writable {
*
* @param eventType type of event
* @param regionName name of region as per HRegionInfo#getRegionName()
- * @param serverName name of server setting data
+ * @param origin Originating {@link ServerName}
*/
public RegionTransitionData(EventType eventType, byte [] regionName,
- String serverName) {
- this(eventType, regionName, serverName, null);
+ final ServerName origin) {
+ this(eventType, regionName, origin, null);
}
/**
@@ -107,16 +108,16 @@ public class RegionTransitionData implements Writable {
*
* @param eventType type of event
* @param regionName name of region as per HRegionInfo#getRegionName()
- * @param serverName name of server setting data
+ * @param origin Originating {@link ServerName}
* @param payload Payload examples include the daughters involved in a
* {@link EventType#RS_ZK_REGION_SPLIT}. Can be null
*/
public RegionTransitionData(EventType eventType, byte [] regionName,
- String serverName, final byte [] payload) {
+ final ServerName serverName, final byte [] payload) {
this.eventType = eventType;
this.stamp = System.currentTimeMillis();
this.regionName = regionName;
- this.serverName = serverName;
+ this.origin = serverName;
this.payload = payload;
}
@@ -155,8 +156,8 @@ public class RegionTransitionData implements Writable {
*
* @return server name of originating regionserver, or null if from master
*/
- public String getServerName() {
- return serverName;
+ public ServerName getOrigin() {
+ return origin;
}
/**
@@ -185,10 +186,8 @@ public class RegionTransitionData implements Writable {
regionName = Bytes.readByteArray(in);
// remaining fields are optional so prefixed with boolean
// the name of the regionserver sending the data
- if(in.readBoolean()) {
- serverName = in.readUTF();
- } else {
- serverName = null;
+ if (in.readBoolean()) {
+ this.origin = new ServerName(in.readUTF());
}
if (in.readBoolean()) {
this.payload = Bytes.readByteArray(in);
@@ -201,9 +200,9 @@ public class RegionTransitionData implements Writable {
out.writeLong(System.currentTimeMillis());
Bytes.writeByteArray(out, regionName);
// remaining fields are optional so prefixed with boolean
- out.writeBoolean(serverName != null);
- if(serverName != null) {
- out.writeUTF(serverName);
+ out.writeBoolean(this.origin != null);
+ if(this.origin != null) {
+ out.writeUTF(this.origin.toString());
}
out.writeBoolean(this.payload != null);
if (this.payload != null) {
@@ -244,7 +243,7 @@ public class RegionTransitionData implements Writable {
@Override
public String toString() {
- return "region=" + Bytes.toString(regionName) + ", server=" + serverName +
+ return "region=" + Bytes.toString(regionName) + ", origin=" + this.origin +
", state=" + eventType;
}
}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java b/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
index d8f8463..01df7f5 100644
--- a/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
+++ b/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HMsg;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HServerInfo;
@@ -148,8 +147,14 @@ public class HbaseObjectWritable implements Writable, WritableWithSize, Configur
// Hbase types
addToMap(HColumnDescriptor.class, code++);
addToMap(HConstants.Modify.class, code++);
- addToMap(HMsg.class, code++);
- addToMap(HMsg[].class, code++);
+
+ // We used to have a class named HMsg but its been removed. Rather than
+ // just axe it, use following random Integer class -- we just chose any
+ // class from java.lang -- instead just so codes that follow stay
+ // in same relative place.
+ addToMap(Integer.class, code++);
+ addToMap(Integer[].class, code++);
+
addToMap(HRegion.class, code++);
addToMap(HRegion[].class, code++);
addToMap(HRegionInfo.class, code++);
diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java b/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java
index 25139b3..902004a 100644
--- a/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java
+++ b/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java
@@ -19,22 +19,16 @@
*/
package org.apache.hadoop.hbase.ipc;
-import org.apache.hadoop.hbase.HMsg;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerInfo;
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.HServerLoad;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.ipc.VersionedProtocol;
-import java.io.IOException;
-
/**
- * HRegionServers interact with the HMasterRegionInterface to report on local
- * goings-on and to obtain data-handling instructions from the HMaster.
- * Changes here need to be reflected in HbaseObjectWritable HbaseRPC#Invoker.
- *
- *
NOTE: if you change the interface, you must change the RPC version
- * number in HBaseRPCProtocolVersion
- *
+ * The Master publishes this Interface for RegionServers to register themselves
+ * on.
*/
public interface HMasterRegionInterface extends VersionedProtocol {
/**
@@ -44,32 +38,27 @@ public interface HMasterRegionInterface extends VersionedProtocol {
// maintained a single global version number on all HBase Interfaces. This
// meant all HBase RPC was broke though only one of the three RPC Interfaces
// had changed. This has since been undone.
- public static final long VERSION = 28L;
+ public static final long VERSION = 29L;
/**
- * Called when a region server first starts
- * @param info server info
+ * Called when a region server first starts.
+ * @param port Port number this regionserver is up on.
+ * @param serverStartcode This servers' startcode.
* @param serverCurrentTime The current time of the region server in ms
* @throws IOException e
* @return Configuration for the regionserver to use: e.g. filesystem,
- * hbase rootdir, etc.
+ * hbase rootdir, the hostname to use creating the RegionServer ServerName,
+ * etc.
*/
- public MapWritable regionServerStartup(HServerInfo info,
- long serverCurrentTime) throws IOException;
+ public MapWritable regionServerStartup(final int port,
+ final long serverStartcode, final long serverCurrentTime)
+ throws IOException;
/**
- * Called to renew lease, tell master what the region server is doing and to
- * receive new instructions from the master
- *
- * @param info server's address and start code
- * @param msgs things the region server wants to tell the master
- * @param mostLoadedRegions Array of HRegionInfos that should contain the
- * reporting server's most loaded regions. These are candidates for being
- * rebalanced.
- * @return instructions from the master to the region server
- * @throws IOException e
+ * @param sn Server name.
+ * @param hsl Server load.
+ * @throws IOException
*/
- public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[],
- HRegionInfo mostLoadedRegions[])
+ public void regionServerReport(ServerName sn, HServerLoad hsl)
throws IOException;
}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java b/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
index 663cab5..864f6eb 100644
--- a/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
+++ b/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
@@ -292,9 +292,11 @@ public interface HRegionInterface extends VersionedProtocol, Stoppable, Abortabl
/**
* Method used when a master is taking the place of another failed one.
- * @return The HSI
+ * @return This servers {@link HServerInfo}; it has RegionServer POV on the
+ * hostname which may not agree w/ how the Master sees this server.
* @throws IOException e
*/
+ // TODO: Deprecate. Uses getServerName instead. Return byte []
public HServerInfo getHServerInfo() throws IOException;
/**
diff --git a/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
index 66a3345..5125a71 100644
--- a/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
+++ b/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
@@ -23,8 +23,9 @@ import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -48,13 +49,17 @@ class ActiveMasterManager extends ZooKeeperListener {
final AtomicBoolean clusterHasActiveMaster = new AtomicBoolean(false);
- private final HServerAddress address;
+ private final ServerName sn;
private final Server master;
- ActiveMasterManager(ZooKeeperWatcher watcher, HServerAddress address,
- Server master) {
+ /**
+ * @param watcher
+ * @param sn ServerName
+ * @param master In an instance of a Master.
+ */
+ ActiveMasterManager(ZooKeeperWatcher watcher, ServerName sn, Server master) {
super(watcher);
- this.address = address;
+ this.sn = sn;
this.master = master;
}
@@ -122,11 +127,11 @@ class ActiveMasterManager extends ZooKeeperListener {
boolean cleanSetOfActiveMaster = true;
// Try to become the active master, watch if there is another master
try {
- if (ZKUtil.setAddressAndWatch(this.watcher,
- this.watcher.masterAddressZNode, this.address)) {
+ if (ZKUtil.createEphemeralNodeAndWatch(this.watcher,
+ this.watcher.masterAddressZNode, Bytes.toBytes(this.sn.toString()))) {
// We are the master, return
this.clusterHasActiveMaster.set(true);
- LOG.info("Master=" + this.address);
+ LOG.info("Master=" + this.sn);
return cleanSetOfActiveMaster;
}
cleanSetOfActiveMaster = false;
@@ -134,9 +139,10 @@ class ActiveMasterManager extends ZooKeeperListener {
// There is another active master running elsewhere or this is a restart
// and the master ephemeral node has not expired yet.
this.clusterHasActiveMaster.set(true);
- HServerAddress currentMaster =
- ZKUtil.getDataAsAddress(this.watcher, this.watcher.masterAddressZNode);
- if (currentMaster != null && currentMaster.equals(this.address)) {
+ byte [] bytes =
+ ZKUtil.getDataAndWatch(this.watcher, this.watcher.masterAddressZNode);
+ ServerName currentMaster = new ServerName(Bytes.toString(bytes));
+ if (currentMaster != null && currentMaster.equals(this.sn)) {
LOG.info("Current master has this master's address, " + currentMaster +
"; master was restarted? Waiting on znode to expire...");
// Hurry along the expiration of the znode.
@@ -177,11 +183,11 @@ class ActiveMasterManager extends ZooKeeperListener {
public void stop() {
try {
// If our address is in ZK, delete it on our way out
- HServerAddress zkAddress =
- ZKUtil.getDataAsAddress(watcher, watcher.masterAddressZNode);
+ byte [] bytes =
+ ZKUtil.getDataAndWatch(watcher, watcher.masterAddressZNode);
// TODO: redo this to make it atomic (only added for tests)
- if(zkAddress != null &&
- zkAddress.equals(address)) {
+ ServerName master = new ServerName(Bytes.toString(bytes));
+ if(master != null && master.equals(this.sn)) {
ZKUtil.deleteNode(watcher, watcher.masterAddressZNode);
}
} catch (KeeperException e) {
diff --git a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index e9b2af2..a36c2d0 100644
--- a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -21,9 +21,7 @@ package org.apache.hadoop.hbase.master;
import java.io.DataInput;
import java.io.DataOutput;
-import java.io.EOFException;
import java.io.IOException;
-import java.net.ConnectException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
@@ -43,11 +41,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader;
@@ -124,8 +121,8 @@ public class AssignmentManager extends ZooKeeperListener {
* with the other under a lock on {@link #regions}
* @see #regions
*/
- private final NavigableMap> servers =
- new TreeMap>();
+ private final NavigableMap> servers =
+ new TreeMap>();
/**
* Region to server assignment map.
@@ -134,8 +131,8 @@ public class AssignmentManager extends ZooKeeperListener {
* with the other under a lock on {@link #regions}
* @see #servers
*/
- private final SortedMap regions =
- new TreeMap();
+ private final SortedMap regions =
+ new TreeMap();
private final ExecutorService executorService;
@@ -169,6 +166,26 @@ public class AssignmentManager extends ZooKeeperListener {
}
/**
+ * Compute the average load across all region servers.
+ * Currently, this uses a very naive computation - just uses the number of
+ * regions being served, ignoring stats about number of requests.
+ * @return the average load
+ */
+ double getAverageLoad() {
+ int totalLoad = 0;
+ int numServers = 0;
+ // Sync on this.regions because access to this.servers always synchronizes
+ // in this order.
+ synchronized (this.regions) {
+ for (Map.Entry> e: servers.entrySet()) {
+ numServers++;
+ totalLoad += e.getValue().size();
+ }
+ }
+ return (double)totalLoad / (double)numServers;
+ }
+
+ /**
* @return Instance of ZKTable.
*/
public ZKTable getZKTable() {
@@ -191,38 +208,51 @@ public class AssignmentManager extends ZooKeeperListener {
}
/**
- * Handle failover. Restore state from META and ZK. Handle any regions in
- * transition. Presumes .META. and -ROOT- deployed.
- * @throws KeeperException
+ * Called on startup.
+ * Figures whether a fresh cluster start of we are joining extant running cluster.
* @throws IOException
+ * @throws KeeperException
+ * @throws InterruptedException
*/
- void processFailover() throws KeeperException, IOException {
+ void joinCluster() throws IOException, KeeperException, InterruptedException {
// Concurrency note: In the below the accesses on regionsInTransition are
// outside of a synchronization block where usually all accesses to RIT are
// synchronized. The presumption is that in this case it is safe since this
// method is being played by a single thread on startup.
- // TODO: Check list of user regions and their assignments against regionservers.
// TODO: Regions that have a null location and are not in regionsInTransitions
// need to be handled.
- // Scan META to build list of existing regions, servers, and assignment
- // Returns servers who have not checked in (assumed dead) and their regions
- Map>> deadServers =
+ Map>> deadServers =
rebuildUserRegions();
- // Process list of dead servers
- processDeadServers(deadServers);
// Check existing regions in transition
List nodes = ZKUtil.listChildrenAndWatchForNewChildren(watcher,
- watcher.assignmentZNode);
- if (nodes.isEmpty()) {
- LOG.info("No regions in transition in ZK to process on failover");
- return;
+ watcher.assignmentZNode);
+ // Run through all regions. If they are not assigned and not in RIT, then
+ // its a clean cluster startup, else its a failover.
+ boolean userRegionsOutOnCluster = false;
+ for (Map.Entry e: this.regions.entrySet()) {
+ if (e.getValue() != null) {
+ userRegionsOutOnCluster = true;
+ break;
+ }
+ if (nodes.contains(e.getKey().getEncodedName())) {
+ userRegionsOutOnCluster = true;
+ break;
+ }
}
- LOG.info("Failed-over master needs to process " + nodes.size() +
- " regions in transition");
- for (String encodedRegionName: nodes) {
- processRegionInTransition(encodedRegionName, null);
+ if (userRegionsOutOnCluster) {
+ LOG.info("Found regions out on cluster or in RIT; failover");
+ processDeadServers(deadServers);
+ if (!nodes.isEmpty()) {
+ for (String encodedRegionName: nodes) {
+ processRegionInTransition(encodedRegionName, null);
+ }
+ }
+ } else {
+ // Fresh cluster startup.
+ cleanoutUnassigned();
+ assignAllUserRegions();
}
}
@@ -251,10 +281,10 @@ public class AssignmentManager extends ZooKeeperListener {
}
/**
- * Process failover of encodedName. Look in
+ * Process failover of servername. Look in RIT.
* @param encodedRegionName Region to process failover for.
- * @param encodedRegionName RegionInfo. If null we'll go get it from meta table.
- * @return
+ * @param regionInfo If null we'll go get it from meta table.
+ * @return True if we processed regionInfo as a RIT.
* @throws KeeperException
* @throws IOException
*/
@@ -265,7 +295,7 @@ public class AssignmentManager extends ZooKeeperListener {
if (data == null) return false;
HRegionInfo hri = regionInfo;
if (hri == null) {
- Pair p =
+ Pair p =
MetaReader.getRegion(catalogTracker, data.getRegionName());
if (p == null) return false;
hri = p.getFirst();
@@ -314,17 +344,18 @@ public class AssignmentManager extends ZooKeeperListener {
// Region is opened, insert into RIT and handle it
regionsInTransition.put(encodedRegionName, new RegionState(
regionInfo, RegionState.State.OPENING, data.getStamp()));
- HServerInfo hsi = serverManager.getServerInfo(data.getServerName());
+ ServerName sn =
+ data.getOrigin() == null? null: data.getOrigin();
// hsi could be null if this server is no longer online. If
// that the case, just let this RIT timeout; it'll be assigned
// to new server then.
- if (hsi == null) {
+ if (sn == null) {
LOG.warn("Region in transition " + regionInfo.getEncodedName() +
- " references a server no longer up " + data.getServerName() +
- "; letting RIT timeout so will be assigned elsewhere");
+ " references a null server; letting RIT timeout so will be " +
+ "assigned elsewhere");
break;
}
- new OpenedRegionHandler(master, this, regionInfo, hsi).process();
+ new OpenedRegionHandler(master, this, regionInfo, sn).process();
break;
}
}
@@ -341,18 +372,19 @@ public class AssignmentManager extends ZooKeeperListener {
*/
private void handleRegion(final RegionTransitionData data) {
synchronized(regionsInTransition) {
- if (data == null || data.getServerName() == null) {
+ if (data == null || data.getOrigin() == null) {
LOG.warn("Unexpected NULL input " + data);
return;
}
+ ServerName sn = data.getOrigin();
// Check if this is a special HBCK transition
- if (data.getServerName().equals(HConstants.HBCK_CODE_NAME)) {
+ if (sn.equals(HConstants.HBCK_CODE_SERVERNAME)) {
handleHBCK(data);
return;
}
// Verify this is a known server
- if (!serverManager.isServerOnline(data.getServerName()) &&
- !this.master.getServerName().equals(data.getServerName())) {
+ if (!serverManager.isServerOnline(sn) &&
+ !this.master.getServerName().equals(sn)) {
LOG.warn("Attempted to handle region transition for server but " +
"server is not online: " + data.getRegionName());
return;
@@ -360,7 +392,7 @@ public class AssignmentManager extends ZooKeeperListener {
String encodedName = HRegionInfo.encodeRegionName(data.getRegionName());
String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
LOG.debug("Handling transition=" + data.getEventType() +
- ", server=" + data.getServerName() + ", region=" + prettyPrintedRegionName);
+ ", server=" + sn + ", region=" + prettyPrintedRegionName);
RegionState regionState = regionsInTransition.get(encodedName);
switch (data.getEventType()) {
case M_ZK_REGION_OFFLINE:
@@ -369,7 +401,7 @@ public class AssignmentManager extends ZooKeeperListener {
case RS_ZK_REGION_SPLITTING:
if (!isInStateForSplitting(regionState)) break;
- addSplittingToRIT(data.getServerName(), encodedName);
+ addSplittingToRIT(sn.toString(), encodedName);
break;
case RS_ZK_REGION_SPLIT:
@@ -378,9 +410,9 @@ public class AssignmentManager extends ZooKeeperListener {
// If null, add SPLITTING state before going to SPLIT
if (regionState == null) {
LOG.info("Received SPLIT for region " + prettyPrintedRegionName +
- " from server " + data.getServerName() +
+ " from server " + sn +
" but region was not first in SPLITTING state; continuing");
- addSplittingToRIT(data.getServerName(), encodedName);
+ addSplittingToRIT(sn.toString(), encodedName);
}
// Check it has daughters.
byte [] payload = data.getPayload();
@@ -394,14 +426,13 @@ public class AssignmentManager extends ZooKeeperListener {
}
assert daughters.size() == 2;
// Assert that we can get a serverinfo for this server.
- HServerInfo hsi = getAndCheckHServerInfo(data.getServerName());
- if (hsi == null) {
- LOG.error("Dropped split! No HServerInfo for " + data.getServerName());
+ if (!this.serverManager.isServerOnline(sn)) {
+ LOG.error("Dropped split! ServerName=" + sn + " unknown.");
break;
}
// Run handler to do the rest of the SPLIT handling.
this.executorService.submit(new SplitRegionHandler(master, this,
- regionState.getRegion(), hsi, daughters));
+ regionState.getRegion(), sn, daughters));
break;
case RS_ZK_REGION_CLOSING:
@@ -410,7 +441,7 @@ public class AssignmentManager extends ZooKeeperListener {
if (regionState == null ||
(!regionState.isPendingClose() && !regionState.isClosing())) {
LOG.warn("Received CLOSING for region " + prettyPrintedRegionName +
- " from server " + data.getServerName() + " but region was in " +
+ " from server " + data.getOrigin() + " but region was in " +
" the state " + regionState + " and not " +
"in expected PENDING_CLOSE or CLOSING states");
return;
@@ -424,7 +455,7 @@ public class AssignmentManager extends ZooKeeperListener {
if (regionState == null ||
(!regionState.isPendingClose() && !regionState.isClosing())) {
LOG.warn("Received CLOSED for region " + prettyPrintedRegionName +
- " from server " + data.getServerName() + " but region was in " +
+ " from server " + data.getOrigin() + " but region was in " +
" the state " + regionState + " and not " +
"in expected PENDING_CLOSE or CLOSING states");
return;
@@ -444,7 +475,7 @@ public class AssignmentManager extends ZooKeeperListener {
(!regionState.isPendingOpen() && !regionState.isOpening())) {
LOG.warn("Received OPENING for region " +
prettyPrintedRegionName +
- " from server " + data.getServerName() + " but region was in " +
+ " from server " + data.getOrigin() + " but region was in " +
" the state " + regionState + " and not " +
"in expected PENDING_OPEN or OPENING states");
return;
@@ -459,7 +490,7 @@ public class AssignmentManager extends ZooKeeperListener {
(!regionState.isPendingOpen() && !regionState.isOpening())) {
LOG.warn("Received OPENED for region " +
prettyPrintedRegionName +
- " from server " + data.getServerName() + " but region was in " +
+ " from server " + data.getOrigin() + " but region was in " +
" the state " + regionState + " and not " +
"in expected PENDING_OPEN or OPENING states");
return;
@@ -468,7 +499,7 @@ public class AssignmentManager extends ZooKeeperListener {
regionState.update(RegionState.State.OPEN, data.getStamp());
this.executorService.submit(
new OpenedRegionHandler(master, this, regionState.getRegion(),
- this.serverManager.getServerInfo(data.getServerName())));
+ data.getOrigin()));
break;
}
}
@@ -506,12 +537,6 @@ public class AssignmentManager extends ZooKeeperListener {
return true;
}
- private HServerInfo getAndCheckHServerInfo(final String serverName) {
- HServerInfo hsi = this.serverManager.getServerInfo(serverName);
- if (hsi == null) LOG.debug("No serverinfo for " + serverName);
- return hsi;
- }
-
/**
* @param serverName
* @param encodedName
@@ -554,9 +579,9 @@ public class AssignmentManager extends ZooKeeperListener {
*/
private HRegionInfo findHRegionInfo(final String serverName,
final String encodedName) {
- HServerInfo hsi = getAndCheckHServerInfo(serverName);
- if (hsi == null) return null;
- List hris = this.servers.get(hsi);
+ ServerName sn = new ServerName(serverName);
+ if (!this.serverManager.isServerOnline(sn)) return null;
+ List hris = this.servers.get(sn);
HRegionInfo foundHri = null;
for (HRegionInfo hri: hris) {
if (hri.getEncodedName().equals(encodedName)) {
@@ -576,7 +601,7 @@ public class AssignmentManager extends ZooKeeperListener {
private void handleHBCK(RegionTransitionData data) {
String encodedName = HRegionInfo.encodeRegionName(data.getRegionName());
LOG.info("Handling HBCK triggered transition=" + data.getEventType() +
- ", server=" + data.getServerName() + ", region=" +
+ ", server=" + data.getOrigin() + ", region=" +
HRegionInfo.prettyPrint(encodedName));
RegionState regionState = regionsInTransition.get(encodedName);
switch (data.getEventType()) {
@@ -723,9 +748,9 @@ public class AssignmentManager extends ZooKeeperListener {
*
* Used when a region has been successfully opened on a region server.
* @param regionInfo
- * @param serverInfo
+ * @param sn
*/
- public void regionOnline(HRegionInfo regionInfo, HServerInfo serverInfo) {
+ public void regionOnline(HRegionInfo regionInfo, ServerName sn) {
synchronized (this.regionsInTransition) {
RegionState rs =
this.regionsInTransition.remove(regionInfo.getEncodedName());
@@ -735,22 +760,22 @@ public class AssignmentManager extends ZooKeeperListener {
}
synchronized (this.regions) {
// Add check
- HServerInfo hsi = this.regions.get(regionInfo);
- if (hsi != null) LOG.warn("Overwriting " + regionInfo.getEncodedName() +
- " on " + hsi);
- this.regions.put(regionInfo, serverInfo);
- addToServers(serverInfo, regionInfo);
+ ServerName oldSn = this.regions.get(regionInfo);
+ if (oldSn != null) LOG.warn("Overwriting " + regionInfo.getEncodedName() +
+ " on " + oldSn + " with " + sn);
+ this.regions.put(regionInfo, sn);
+ addToServers(sn, regionInfo);
this.regions.notifyAll();
}
// Remove plan if one.
clearRegionPlan(regionInfo);
// Update timers for all regions in transition going against this server.
- updateTimers(serverInfo);
+ updateTimers(sn);
}
/**
* Touch timers for all regions in transition that have the passed
- * hsi in common.
+ * sn in common.
* Call this method whenever a server checks in. Doing so helps the case where
* a new regionserver has joined the cluster and its been given 1k regions to
* open. If this method is tickled every time the region reports in a
@@ -759,9 +784,9 @@ public class AssignmentManager extends ZooKeeperListener {
* as part of bulk assign -- there we have a different mechanism for extending
* the regions in transition timer (we turn it off temporarily -- because
* there is no regionplan involved when bulk assigning.
- * @param hsi
+ * @param sn
*/
- private void updateTimers(final HServerInfo hsi) {
+ private void updateTimers(final ServerName sn) {
// This loop could be expensive.
// First make a copy of current regionPlan rather than hold sync while
// looping because holding sync can cause deadlock. Its ok in this loop
@@ -771,7 +796,7 @@ public class AssignmentManager extends ZooKeeperListener {
copy.putAll(this.regionPlans);
}
for (Map.Entry e: copy.entrySet()) {
- if (!e.getValue().getDestination().equals(hsi)) continue;
+ if (!e.getValue().getDestination().equals(sn)) continue;
RegionState rs = null;
synchronized (this.regionsInTransition) {
rs = this.regionsInTransition.get(e.getKey());
@@ -810,11 +835,11 @@ public class AssignmentManager extends ZooKeeperListener {
*/
public void setOffline(HRegionInfo regionInfo) {
synchronized (this.regions) {
- HServerInfo serverInfo = this.regions.remove(regionInfo);
- if (serverInfo == null) return;
- List serverRegions = this.servers.get(serverInfo);
+ ServerName sn = this.regions.remove(regionInfo);
+ if (sn == null) return;
+ List serverRegions = this.servers.get(sn);
if (!serverRegions.remove(regionInfo)) {
- LOG.warn("No " + regionInfo + " on " + serverInfo);
+ LOG.warn("No " + regionInfo + " on " + sn);
}
}
}
@@ -889,10 +914,10 @@ public class AssignmentManager extends ZooKeeperListener {
* @param destination
* @param regions Regions to assign.
*/
- void assign(final HServerInfo destination,
+ void assign(final ServerName destination,
final List regions) {
LOG.debug("Bulk assigning " + regions.size() + " region(s) to " +
- destination.getServerName());
+ destination.toString());
List states = new ArrayList(regions.size());
synchronized (this.regionsInTransition) {
@@ -915,7 +940,7 @@ public class AssignmentManager extends ZooKeeperListener {
for (int oldCounter = 0; true;) {
int count = counter.get();
if (oldCounter != count) {
- LOG.info(destination.getServerName() + " unassigned znodes=" + count +
+ LOG.info(destination.toString() + " unassigned znodes=" + count +
" of total=" + total);
oldCounter = count;
}
@@ -949,7 +974,7 @@ public class AssignmentManager extends ZooKeeperListener {
"; bulk assign FAILED", t);
return;
}
- LOG.debug("Bulk assigning done for " + destination.getServerName());
+ LOG.debug("Bulk assigning done for " + destination.toString());
}
/**
@@ -958,11 +983,11 @@ public class AssignmentManager extends ZooKeeperListener {
static class CreateUnassignedAsyncCallback implements AsyncCallback.StringCallback {
private final Log LOG = LogFactory.getLog(CreateUnassignedAsyncCallback.class);
private final ZooKeeperWatcher zkw;
- private final HServerInfo destination;
+ private final ServerName destination;
private final AtomicInteger counter;
CreateUnassignedAsyncCallback(final ZooKeeperWatcher zkw,
- final HServerInfo destination, final AtomicInteger counter) {
+ final ServerName destination, final AtomicInteger counter) {
this.zkw = zkw;
this.destination = destination;
this.counter = counter;
@@ -978,7 +1003,7 @@ public class AssignmentManager extends ZooKeeperListener {
", rc=" + rc, null);
return;
}
- LOG.debug("rs=" + (RegionState)ctx + ", server=" + this.destination.getServerName());
+ LOG.debug("rs=" + (RegionState)ctx + ", server=" + this.destination.toString());
// Async exists to set a watcher so we'll get triggered when
// unassigned node changes.
this.zkw.getZooKeeper().exists(path, this.zkw,
@@ -1065,7 +1090,7 @@ public class AssignmentManager extends ZooKeeperListener {
if (plan == null) return; // Should get reassigned later when RIT times out.
try {
LOG.debug("Assigning region " + state.getRegion().getRegionNameAsString() +
- " to " + plan.getDestination().getServerName());
+ " to " + plan.getDestination().toString());
// Transition RegionState to PENDING_OPEN
state.update(RegionState.State.PENDING_OPEN);
// Send OPEN RPC. This can fail if the server on other end is is not up.
@@ -1106,7 +1131,7 @@ public class AssignmentManager extends ZooKeeperListener {
state.update(RegionState.State.OFFLINE);
try {
if(!ZKAssign.createOrForceNodeOffline(master.getZooKeeper(),
- state.getRegion(), master.getServerName())) {
+ state.getRegion(), this.master.getServerName())) {
LOG.warn("Attempted to create/force node into OFFLINE state before " +
"completing assignment but failed to do so for " + state);
return false;
@@ -1135,7 +1160,7 @@ public class AssignmentManager extends ZooKeeperListener {
state.update(RegionState.State.OFFLINE);
try {
ZKAssign.asyncCreateNodeOffline(master.getZooKeeper(), state.getRegion(),
- master.getServerName(), cb, ctx);
+ this.master.getServerName(), cb, ctx);
} catch (KeeperException e) {
master.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
return false;
@@ -1163,10 +1188,10 @@ public class AssignmentManager extends ZooKeeperListener {
* if no servers to assign, it returns null).
*/
RegionPlan getRegionPlan(final RegionState state,
- final HServerInfo serverToExclude, final boolean forceNewPlan) {
+ final ServerName serverToExclude, final boolean forceNewPlan) {
// Pickup existing plan or make a new one
String encodedName = state.getRegion().getEncodedName();
- List servers = this.serverManager.getOnlineServersList();
+ List servers = this.serverManager.getOnlineServersList();
// The remove below hinges on the fact that the call to
// serverManager.getOnlineServersList() returns a copy
if (serverToExclude != null) servers.remove(serverToExclude);
@@ -1254,7 +1279,7 @@ public class AssignmentManager extends ZooKeeperListener {
}
}
// Send CLOSE RPC
- HServerInfo server = null;
+ ServerName server = null;
synchronized (this.regions) {
server = regions.get(region);
}
@@ -1339,13 +1364,14 @@ public class AssignmentManager extends ZooKeeperListener {
* @throws InterruptedException
* @throws IOException
*/
- public void assignUserRegions(HRegionInfo[] regions, List servers) throws IOException, InterruptedException {
+ public void assignUserRegions(List regions, List servers)
+ throws IOException, InterruptedException {
if (regions == null)
return;
- Map> bulkPlan = null;
+ Map> bulkPlan = null;
// Generate a round-robin bulk assignment plan
bulkPlan = LoadBalancer.roundRobinAssignment(regions, servers);
- LOG.info("Bulk assigning " + regions.length + " region(s) round-robin across " +
+ LOG.info("Bulk assigning " + regions.size() + " region(s) round-robin across " +
servers.size() + " server(s)");
// Use fixed count thread pool assigning.
BulkAssigner ba = new BulkStartupAssigner(this.master, bulkPlan, this);
@@ -1364,10 +1390,10 @@ public class AssignmentManager extends ZooKeeperListener {
*/
public void assignAllUserRegions() throws IOException, InterruptedException {
// Get all available servers
- List servers = serverManager.getOnlineServersList();
+ List servers = serverManager.getOnlineServersList();
// Scan META for all user regions, skipping any disabled tables
- Map allRegions =
+ Map allRegions =
MetaReader.fullScan(catalogTracker, this.zkTable.getDisabledTables(), true);
if (allRegions == null || allRegions.isEmpty()) return;
@@ -1375,13 +1401,13 @@ public class AssignmentManager extends ZooKeeperListener {
boolean retainAssignment = master.getConfiguration().
getBoolean("hbase.master.startup.retainassign", true);
- Map> bulkPlan = null;
+ Map> bulkPlan = null;
if (retainAssignment) {
// Reuse existing assignment info
bulkPlan = LoadBalancer.retainAssignment(allRegions, servers);
} else {
// assign regions in round-robin fashion
- assignUserRegions(allRegions.keySet().toArray(new HRegionInfo[allRegions.size()]), servers);
+ assignUserRegions(new ArrayList(allRegions.keySet()), servers);
return;
}
LOG.info("Bulk assigning " + allRegions.size() + " region(s) across " +
@@ -1397,11 +1423,11 @@ public class AssignmentManager extends ZooKeeperListener {
* Run bulk assign on startup.
*/
static class BulkStartupAssigner extends BulkAssigner {
- private final Map> bulkPlan;
+ private final Map> bulkPlan;
private final AssignmentManager assignmentManager;
BulkStartupAssigner(final Server server,
- final Map> bulkPlan,
+ final Map> bulkPlan,
final AssignmentManager am) {
super(server);
this.bulkPlan = bulkPlan;
@@ -1427,7 +1453,7 @@ public class AssignmentManager extends ZooKeeperListener {
@Override
protected void populatePool(java.util.concurrent.ExecutorService pool) {
- for (Map.Entry> e: this.bulkPlan.entrySet()) {
+ for (Map.Entry> e: this.bulkPlan.entrySet()) {
pool.execute(new SingleServerBulkAssigner(e.getKey(), e.getValue(),
this.assignmentManager));
}
@@ -1443,11 +1469,11 @@ public class AssignmentManager extends ZooKeeperListener {
* Manage bulk assigning to a server.
*/
static class SingleServerBulkAssigner implements Runnable {
- private final HServerInfo regionserver;
+ private final ServerName regionserver;
private final List regions;
private final AssignmentManager assignmentManager;
- SingleServerBulkAssigner(final HServerInfo regionserver,
+ SingleServerBulkAssigner(final ServerName regionserver,
final List regions, final AssignmentManager am) {
this.regionserver = regionserver;
this.regions = regions;
@@ -1494,28 +1520,26 @@ public class AssignmentManager extends ZooKeeperListener {
* in META
* @throws IOException
*/
- private Map>> rebuildUserRegions()
+ Map>> rebuildUserRegions()
throws IOException {
// Region assignment from META
- List results = MetaReader.fullScanOfResults(catalogTracker);
+ List results = MetaReader.fullScanOfResults(this.catalogTracker);
// Map of offline servers and their regions to be returned
- Map>> offlineServers =
- new TreeMap>>();
+ Map>> offlineServers =
+ new TreeMap>>();
// Iterate regions in META
for (Result result : results) {
- Pair region =
- MetaReader.metaRowToRegionPairWithInfo(result);
+ Pair region = MetaReader.metaRowToRegionPair(result);
if (region == null) continue;
- HServerInfo regionLocation = region.getSecond();
HRegionInfo regionInfo = region.getFirst();
+ ServerName regionLocation = region.getSecond();
if (regionLocation == null) {
// Region not being served, add to region map with no assignment
// If this needs to be assigned out, it will also be in ZK as RIT
this.regions.put(regionInfo, null);
- } else if (!serverManager.isServerOnline(
- regionLocation.getServerName())) {
+ } else if (!this.serverManager.isServerOnline(regionLocation)) {
// Region is located on a server that isn't online
- List> offlineRegions =
+ List> offlineRegions =
offlineServers.get(regionLocation);
if (offlineRegions == null) {
offlineRegions = new ArrayList>(1);
@@ -1524,7 +1548,7 @@ public class AssignmentManager extends ZooKeeperListener {
offlineRegions.add(new Pair(regionInfo, result));
} else {
// Region is being served and on an active server
- regions.put(regionInfo, regionLocation);
+ this.regions.put(regionInfo, regionLocation);
addToServers(regionLocation, regionInfo);
}
}
@@ -1545,9 +1569,9 @@ public class AssignmentManager extends ZooKeeperListener {
* @throws KeeperException
*/
private void processDeadServers(
- Map>> deadServers)
+ Map>> deadServers)
throws IOException, KeeperException {
- for (Map.Entry>> deadServer :
+ for (Map.Entry>> deadServer:
deadServers.entrySet()) {
List> regions = deadServer.getValue();
for (Pair region : regions) {
@@ -1556,7 +1580,7 @@ public class AssignmentManager extends ZooKeeperListener {
// If region was in transition (was in zk) force it offline for reassign
try {
ZKAssign.createOrForceNodeOffline(watcher, regionInfo,
- master.getServerName());
+ this.master.getServerName());
} catch (KeeperException.NoNodeException nne) {
// This is fine
}
@@ -1572,11 +1596,11 @@ public class AssignmentManager extends ZooKeeperListener {
* @param hsi
* @param hri
*/
- private void addToServers(final HServerInfo hsi, final HRegionInfo hri) {
- List hris = servers.get(hsi);
+ private void addToServers(final ServerName sn, final HRegionInfo hri) {
+ List hris = servers.get(sn);
if (hris == null) {
hris = new ArrayList();
- servers.put(hsi, hris);
+ servers.put(sn, hris);
}
hris.add(hri);
}
@@ -1789,7 +1813,7 @@ public class AssignmentManager extends ZooKeeperListener {
try {
data = new RegionTransitionData(
EventType.M_ZK_REGION_OFFLINE, regionInfo.getRegionName(),
- master.getServerName());
+ master.getServerName());
if (ZKUtil.setData(watcher, node, data.getBytes(),
stat.getVersion())) {
// Node is now OFFLINE, let's trigger another assignment
@@ -1854,16 +1878,16 @@ public class AssignmentManager extends ZooKeeperListener {
/**
* Process shutdown server removing any assignments.
- * @param hsi Server that went down.
+ * @param sn Server that went down.
* @return list of regions in transition on this server
*/
- public List processServerShutdown(final HServerInfo hsi) {
+ public List processServerShutdown(final ServerName sn) {
// Clean out any existing assignment plans for this server
synchronized (this.regionPlans) {
for (Iterator > i =
this.regionPlans.entrySet().iterator(); i.hasNext();) {
Map.Entry e = i.next();
- if (e.getValue().getDestination().equals(hsi)) {
+ if (e.getValue().getDestination().equals(sn)) {
// Use iterator's remove else we'll get CME
i.remove();
}
@@ -1875,7 +1899,7 @@ public class AssignmentManager extends ZooKeeperListener {
Set deadRegions = null;
List rits = new ArrayList();
synchronized (this.regions) {
- List assignedRegions = this.servers.remove(hsi);
+ List assignedRegions = this.servers.remove(sn);
if (assignedRegions == null || assignedRegions.isEmpty()) {
// No regions on this server, we are done, return empty list of RITs
return rits;
@@ -1900,16 +1924,16 @@ public class AssignmentManager extends ZooKeeperListener {
/**
* Update inmemory structures.
- * @param hsi Server that reported the split
+ * @param sn Server that reported the split
* @param parent Parent region that was split
* @param a Daughter region A
* @param b Daughter region B
*/
- public void handleSplitReport(final HServerInfo hsi, final HRegionInfo parent,
+ public void handleSplitReport(final ServerName sn, final HRegionInfo parent,
final HRegionInfo a, final HRegionInfo b) {
regionOffline(parent);
- regionOnline(a, hsi);
- regionOnline(b, hsi);
+ regionOnline(a, sn);
+ regionOnline(b, sn);
// There's a possibility that the region was splitting while a user asked
// the master to disable, we need to make sure we close those regions in
@@ -1927,21 +1951,16 @@ public class AssignmentManager extends ZooKeeperListener {
* If a new server has come in and it has no regions, it will not be included
* in the returned Map.
*/
- Map> getAssignments() {
+ Map> getAssignments() {
// This is an EXPENSIVE clone. Cloning though is the safest thing to do.
// Can't let out original since it can change and at least the loadbalancer
// wants to iterate this exported list. We need to synchronize on regions
// since all access to this.servers is under a lock on this.regions.
- Map> result = null;
+ Map> result = null;
synchronized (this.regions) {
- result = new HashMap>(this.servers.size());
- for (Map.Entry> e: this.servers.entrySet()) {
- List shallowCopy = new ArrayList(e.getValue());
- HServerInfo clone = new HServerInfo(e.getKey());
- // Set into server load the number of regions this server is carrying
- // The load balancer calculation needs it at least and its handy.
- clone.getLoad().setNumberOfRegions(e.getValue().size());
- result.put(clone, shallowCopy);
+ result = new HashMap>(this.servers.size());
+ for (Map.Entry> e: this.servers.entrySet()) {
+ result.put(e.getKey(), new ArrayList(e.getValue()));
}
}
return result;
@@ -1950,14 +1969,14 @@ public class AssignmentManager extends ZooKeeperListener {
/**
* @param encodedRegionName Region encoded name.
* @return Null or a {@link Pair} instance that holds the full {@link HRegionInfo}
- * and the hosting servers {@link HServerInfo}.
+ * and the hosting servers {@link ServerName}.
*/
- Pair getAssignment(final byte [] encodedRegionName) {
+ Pair getAssignment(final byte [] encodedRegionName) {
String name = Bytes.toString(encodedRegionName);
synchronized(this.regions) {
- for (Map.Entry e: this.regions.entrySet()) {
+ for (Map.Entry e: this.regions.entrySet()) {
if (e.getKey().getEncodedName().equals(name)) {
- return new Pair(e.getKey(), e.getValue());
+ return new Pair(e.getKey(), e.getValue());
}
}
}
@@ -1975,28 +1994,12 @@ public class AssignmentManager extends ZooKeeperListener {
}
/**
- * @param hsi
- * @return True if this server is carrying a catalog region, a region from
- * -ROOT- or .META. table.
- */
- boolean isMetaRegionServer(final HServerInfo hsi) {
- synchronized (this.regions) {
- List regions = this.servers.get(hsi);
- if (regions == null || regions.isEmpty()) return false;
- for (HRegionInfo hri: regions) {
- if (hri.isMetaRegion()) return true;
- }
- }
- return false;
- }
-
- /**
* Run through remaining regionservers and unassign all catalog regions.
*/
void unassignCatalogRegions() {
this.servers.entrySet();
synchronized (this.regions) {
- for (Map.Entry> e: this.servers.entrySet()) {
+ for (Map.Entry> e: this.servers.entrySet()) {
List regions = e.getValue();
if (regions == null || regions.isEmpty()) continue;
for (HRegionInfo hri: regions) {
diff --git a/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
index efcbb99..38f78ad 100644
--- a/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
+++ b/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
@@ -27,7 +27,7 @@ import java.util.List;
import java.util.Set;
import org.apache.commons.lang.NotImplementedException;
-import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.ServerName;
/**
* Class to hold dead servers list and utility querying dead server list.
@@ -58,7 +58,7 @@ public class DeadServer implements Set {
}
/**
- * @param serverName
+ * @param serverName Server name
* @return true if server is dead
*/
public boolean isDeadServer(final String serverName) {
@@ -74,7 +74,7 @@ public class DeadServer implements Set {
* @return true if server is dead
*/
boolean isDeadServer(final String serverName, final boolean hostAndPortOnly) {
- return HServerInfo.isServer(this, serverName, hostAndPortOnly);
+ return ServerName.isServer(this, serverName, hostAndPortOnly);
}
/**
diff --git a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 62789d3..f20e4bf 100644
--- a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -23,8 +23,8 @@ import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
@@ -36,14 +36,13 @@ import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HMsg;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
@@ -54,8 +53,8 @@ import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.MetaScanner;
-import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
+import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
import org.apache.hadoop.hbase.ipc.HBaseRPC;
@@ -131,8 +130,6 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
// RPC server for the HMaster
private final RpcServer rpcServer;
- // Address of the HMaster
- private final HServerAddress address;
// Metrics for the HMaster
private final MasterMetrics metrics;
// file system manager for the master FS operations
@@ -172,6 +169,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
private LogCleaner logCleaner;
private MasterCoprocessorHost cpHost;
+ private final ServerName serverName;
/**
* Initializes the HMaster. The steps are as follows:
@@ -189,43 +187,50 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
throws IOException, KeeperException, InterruptedException {
this.conf = conf;
- /*
- * Determine address and initialize RPC server (but do not start).
- * The RPC server ports can be ephemeral. Create a ZKW instance.
- */
- HServerAddress a = new HServerAddress(getMyAddress(this.conf));
- int numHandlers = conf.getInt("hbase.regionserver.handler.count", 10);
+ // Server to handle client requests.
+ String hostname = DNS.getDefaultHost(
+ conf.get("hbase.master.dns.interface", "default"),
+ conf.get("hbase.master.dns.nameserver", "default"));
+ int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
+ // Creation of a HSA will force a resolve.
+ InetSocketAddress isa = new InetSocketAddress(hostname, port);
+ if (isa.getAddress() == null) {
+ throw new IllegalArgumentException("Failed resolve of " + isa);
+ }
+ int numHandlers = conf.getInt("hbase.master.handler.count",
+ conf.getInt("hbase.regionserver.handler.count", 25));
this.rpcServer = HBaseRPC.getServer(this,
new Class>[]{HMasterInterface.class, HMasterRegionInterface.class},
- a.getBindAddress(), a.getPort(),
- numHandlers,
- 0, // we dont use high priority handlers in master
- false, conf,
- 0); // this is a DNC w/o high priority handlers
- this.address = new HServerAddress(rpcServer.getListenerAddress());
+ isa.getHostName(), // BindAddress is IP we got for this server.
+ isa.getPort(),
+ numHandlers,
+ 0, // we dont use high priority handlers in master
+ false, conf,
+ 0); // this is a DNC w/o high priority handlers
+ // Update isa with what is in rpc; it may have changed the port; e.g. if
+ // we were asked bind to port 0.
+ isa = this.rpcServer.getListenerAddress();
// initialize server principal (if using secure Hadoop)
User.login(conf, "hbase.master.keytab.file",
- "hbase.master.kerberos.principal", this.address.getHostname());
+ "hbase.master.kerberos.principal", isa.getHostName());
// set the thread name now we have an address
- setName(MASTER + "-" + this.address);
+ setName(MASTER + "-" + isa.toString());
Replication.decorateMasterConfiguration(this.conf);
-
this.rpcServer.startThreads();
+ this.serverName = new ServerName(isa.getHostName(), isa.getPort(),
+ System.currentTimeMillis());
// Hack! Maps DFSClient => Master for logs. HDFS made this
// config param for task trackers, but we can piggyback off of it.
if (this.conf.get("mapred.task.id") == null) {
- this.conf.set("mapred.task.id", "hb_m_" + this.address.toString() +
+ this.conf.set("mapred.task.id", "hb_m_" + this.serverName.toString() +
"_" + System.currentTimeMillis());
}
-
- this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" +
- address.getPort(), this);
-
- this.metrics = new MasterMetrics(getServerName());
+ this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" + isa.getPort(), this);
+ this.metrics = new MasterMetrics(getServerName().toString());
}
/**
@@ -276,7 +281,8 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
* now wait until it dies to try and become the next active master. If we
* do not succeed on our first attempt, this is no longer a cluster startup.
*/
- this.activeMasterManager = new ActiveMasterManager(zooKeeper, address, this);
+ this.activeMasterManager =
+ new ActiveMasterManager(zooKeeper, this.serverName, this);
this.zooKeeper.registerListener(activeMasterManager);
stallIfBackupMaster(this.conf, this.activeMasterManager);
this.activeMasterManager.blockUntilBecomingActiveMaster();
@@ -352,9 +358,9 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
fileSystemManager.getClusterId());
this.connection = HConnectionManager.getConnection(conf);
- this.executorService = new ExecutorService(getServerName());
+ this.executorService = new ExecutorService(getServerName().toString());
- this.serverManager = new ServerManager(this, this, metrics);
+ this.serverManager = new ServerManager(this, this);
this.catalogTracker = new CatalogTracker(this.zooKeeper, this.connection,
this, conf.getInt("hbase.master.catalog.timeout", Integer.MAX_VALUE));
@@ -376,7 +382,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
boolean wasUp = this.clusterStatusTracker.isClusterUp();
if (!wasUp) this.clusterStatusTracker.setClusterUp();
- LOG.info("Server active/primary master; " + this.address +
+ LOG.info("Server active/primary master; " + this.serverName.toString() +
", sessionid=0x" +
Long.toHexString(this.zooKeeper.getZooKeeper().getSessionId()) +
", cluster-up flag was=" + wasUp);
@@ -387,29 +393,25 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
// start up all service threads.
startServiceThreads();
- // Wait for region servers to report in. Returns count of regions.
- int regionCount = this.serverManager.waitForRegionServers();
+ // Wait for region servers to report in.
+ this.serverManager.waitForRegionServers();
+ // Check zk for regionservers that are up but didn't register
+ for (ServerName sn: this.regionServerTracker.getOnlineServers()) {
+ if (!this.serverManager.isServerOnline(sn)) {
+ // Not registered; add it.
+ LOG.info("Registering server found up in zk: " + sn);
+ this.serverManager.recordNewServer(sn, HServerLoad.EMPTY_HSERVERLOAD);
+ }
+ }
// TODO: Should do this in background rather than block master startup
this.fileSystemManager.
- splitLogAfterStartup(this.serverManager.getOnlineServers());
+ splitLogAfterStartup(this.serverManager.getOnlineServers().keySet());
// Make sure root and meta assigned before proceeding.
assignRootAndMeta();
-
- // Is this fresh start with no regions assigned or are we a master joining
- // an already-running cluster? If regionsCount == 0, then for sure a
- // fresh start. TOOD: Be fancier. If regionsCount == 2, perhaps the
- // 2 are .META. and -ROOT- and we should fall into the fresh startup
- // branch below. For now, do processFailover.
- if (regionCount == 0) {
- LOG.info("Master startup proceeding: cluster startup");
- this.assignmentManager.cleanoutUnassigned();
- this.assignmentManager.assignAllUserRegions();
- } else {
- LOG.info("Master startup proceeding: master failover");
- this.assignmentManager.processFailover();
- }
+ // Fixup assignment manager status
+ this.assignmentManager.joinCluster();
// Start balancer and meta catalog janitor after meta and regions have
// been assigned.
@@ -444,7 +446,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
} else {
// Region already assigned. We didnt' assign it. Add to in-memory state.
this.assignmentManager.regionOnline(HRegionInfo.ROOT_REGIONINFO,
- this.serverManager.getHServerInfo(this.catalogTracker.getRootLocation()));
+ this.catalogTracker.getRootLocation());
}
LOG.info("-ROOT- assigned=" + assigned + ", rit=" + rit +
", location=" + catalogTracker.getRootLocation());
@@ -462,32 +464,13 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
} else {
// Region already assigned. We didnt' assign it. Add to in-memory state.
this.assignmentManager.regionOnline(HRegionInfo.FIRST_META_REGIONINFO,
- this.serverManager.getHServerInfo(this.catalogTracker.getMetaLocation()));
+ this.catalogTracker.getMetaLocation());
}
LOG.info(".META. assigned=" + assigned + ", rit=" + rit +
", location=" + catalogTracker.getMetaLocation());
return assigned;
}
- /*
- * @return This masters' address.
- * @throws UnknownHostException
- */
- private static String getMyAddress(final Configuration c)
- throws UnknownHostException {
- // Find out our address up in DNS.
- String s = DNS.getDefaultHost(c.get("hbase.master.dns.interface","default"),
- c.get("hbase.master.dns.nameserver","default"));
- s += ":" + c.get(HConstants.MASTER_PORT,
- Integer.toString(HConstants.DEFAULT_MASTER_PORT));
- return s;
- }
-
- /** @return HServerAddress of the master server */
- public HServerAddress getMasterAddress() {
- return this.address;
- }
-
public long getProtocolVersion(String protocol, long clientVersion) {
if (HMasterInterface.class.getName().equals(protocol)) {
return HMasterInterface.VERSION;
@@ -627,25 +610,22 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
}
@Override
- public MapWritable regionServerStartup(final HServerInfo serverInfo,
- final long serverCurrentTime)
+ public MapWritable regionServerStartup(final int port,
+ final long serverStartCode, final long serverCurrentTime)
throws IOException {
- // Set the ip into the passed in serverInfo. Its ip is more than likely
- // not the ip that the master sees here. See at end of this method where
- // we pass it back to the regionserver by setting "hbase.regionserver.address"
- // Everafter, the HSI combination 'server name' is what uniquely identifies
- // the incoming RegionServer.
- InetSocketAddress address = new InetSocketAddress(
- HBaseServer.getRemoteIp().getHostName(),
- serverInfo.getServerAddress().getPort());
- serverInfo.setServerAddress(new HServerAddress(address));
-
+ // This call to InetSocketAddress will resolve the hostname to an IP.
+ InetSocketAddress isa = new InetSocketAddress(
+ HBaseServer.getRemoteIp().getHostName(), port);
+ if (isa.isUnresolved()) {
+ LOG.warn("Failed resolve of " + isa);
+ }
// Register with server manager
- this.serverManager.regionServerStartup(serverInfo, serverCurrentTime);
+ this.serverManager.regionServerStartup(isa, serverStartCode,
+ serverCurrentTime);
// Send back some config info
MapWritable mw = createConfigurationSubset();
- mw.put(new Text("hbase.regionserver.address"),
- serverInfo.getServerAddress());
+ mw.put(new Text(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER),
+ new Text(this.serverName.getHostname()));
return mw;
}
@@ -664,23 +644,13 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
}
@Override
- public HMsg [] regionServerReport(HServerInfo serverInfo, HMsg msgs[],
- HRegionInfo[] mostLoadedRegions)
- throws IOException {
- return adornRegionServerAnswer(serverInfo,
- this.serverManager.regionServerReport(serverInfo, msgs, mostLoadedRegions));
- }
-
- /**
- * Override if you'd add messages to return to regionserver hsi
- * or to send an exception.
- * @param msgs Messages to add to
- * @return Messages to return to
- * @throws IOException exceptions that were injected for the region servers
- */
- protected HMsg [] adornRegionServerAnswer(final HServerInfo hsi,
- final HMsg [] msgs) throws IOException {
- return msgs;
+ public void regionServerReport(ServerName sn, HServerLoad hsl)
+ throws IOException {
+ this.serverManager.regionServerReport(sn, hsl);
+ if (hsl != null && this.metrics != null) {
+ // Up our metrics.
+ this.metrics.incrementRequests(hsl.getNumberOfRequests());
+ }
}
public boolean isMasterRunning() {
@@ -740,14 +710,13 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
}
}
- Map> assignments =
+ Map> assignments =
this.assignmentManager.getAssignments();
// Returned Map from AM does not include mention of servers w/o assignments.
- for (Map.Entry e:
+ for (Map.Entry e:
this.serverManager.getOnlineServers().entrySet()) {
- HServerInfo hsi = e.getValue();
- if (!assignments.containsKey(hsi)) {
- assignments.put(hsi, new ArrayList());
+ if (!assignments.containsKey(e.getKey())) {
+ assignments.put(e.getKey(), new ArrayList());
}
}
List plans = this.balancer.balanceCluster(assignments);
@@ -814,12 +783,12 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
@Override
public void move(final byte[] encodedRegionName, final byte[] destServerName)
throws UnknownRegionException {
- Pair p =
+ Pair p =
this.assignmentManager.getAssignment(encodedRegionName);
if (p == null)
throw new UnknownRegionException(Bytes.toString(encodedRegionName));
HRegionInfo hri = p.getFirst();
- HServerInfo dest = null;
+ ServerName dest = null;
if (destServerName == null || destServerName.length == 0) {
LOG.info("Passed destination servername is null/empty so " +
"choosing a server at random");
@@ -827,12 +796,12 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
// Unassign will reassign it elsewhere choosing random server.
this.assignmentManager.unassign(hri);
} else {
- dest = this.serverManager.getServerInfo(new String(destServerName));
-
+ dest = new ServerName(Bytes.toString(destServerName));
if (this.cpHost != null) {
this.cpHost.preMove(p.getFirst(), p.getSecond(), dest);
}
RegionPlan rp = new RegionPlan(p.getFirst(), p.getSecond(), dest);
+ LOG.info("Added move plan " + rp + ", running balancer");
this.assignmentManager.balance(rp);
if (this.cpHost != null) {
this.cpHost.postMove(p.getFirst(), p.getSecond(), dest);
@@ -911,9 +880,9 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
}
// 5. Trigger immediate assignment of the regions in round-robin fashion
- List servers = serverManager.getOnlineServersList();
+ List servers = serverManager.getOnlineServersList();
try {
- this.assignmentManager.assignUserRegions(newRegions, servers);
+ this.assignmentManager.assignUserRegions(Arrays.asList(newRegions), servers);
} catch (InterruptedException ie) {
LOG.error("Caught " + ie + " during round-robin assignment");
throw new IOException(ie);
@@ -1015,11 +984,11 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
* is found, but not currently deployed, the second element of the pair
* may be null.
*/
- Pair getTableRegionForRow(
+ Pair getTableRegionForRow(
final byte [] tableName, final byte [] rowKey)
throws IOException {
- final AtomicReference> result =
- new AtomicReference>(null);
+ final AtomicReference> result =
+ new AtomicReference>(null);
MetaScannerVisitor visitor =
new MetaScannerVisitor() {
@@ -1028,13 +997,11 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
if (data == null || data.size() <= 0) {
return true;
}
- Pair