diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index acc4a05..2ecd4ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1345,6 +1345,17 @@ public static final String YARN_HTTP_POLICY_KEY = YARN_PREFIX + "http.policy"; public static final String YARN_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY .name(); + + public static final String NODE_LABELS_PREFIX = YARN_PREFIX + "node-labels."; + + /** URI for NodeLabelManager */ + public static final String FS_NODE_LABELS_STORE_URI = NODE_LABELS_PREFIX + + "fs-store.uri"; + public static final String DEFAULT_FS_NODE_LABELS_STORE_URI = "file:///tmp/"; + public static final String FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC = + NODE_LABELS_PREFIX + "fs-store.retry-policy-spec"; + public static final String DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC = + "2000, 500"; public YarnConfiguration() { super(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java new file mode 100644 index 0000000..694970d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java @@ -0,0 +1,254 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.EOFException; +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.SetNodeToLabelsRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SetNodeToLabelsRequestPBImpl; + +import com.google.common.collect.Sets; + +public class FileSystemNodeLabelsStore extends NodeLabelsStore { + + public FileSystemNodeLabelsStore(NodeLabelsManager mgr) { + super(mgr); + } + + protected static final Log LOG = LogFactory.getLog(FileSystemNodeLabelsStore.class); + + protected static final String ROOT_DIR_NAME = "FSNodeLabelManagerRoot"; + protected static final String MIRROR_FILENAME = "nodelabel.mirror"; + protected static final String EDITLOG_FILENAME = "nodelabel.editlog"; + + protected enum SerializedLogType { + ADD_LABELS, NODE_TO_LABELS, REMOVE_LABELS + } + + Path fsWorkingPath; + Path rootDirPath; + FileSystem fs; + FSDataOutputStream editlogOs; + Path editLogPath; + + @Override + public void init(Configuration conf) throws Exception { + fsWorkingPath = + new Path(conf.get(YarnConfiguration.FS_NODE_LABELS_STORE_URI, + YarnConfiguration.DEFAULT_FS_NODE_LABELS_STORE_URI)); + rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); + + setFileSystem(conf); + + // mkdir of root dir path + fs.mkdirs(rootDirPath); + } + + @Override + public void finalize() throws Exception { + try { + fs.close(); + editlogOs.close(); + } catch (Exception e) { + LOG.warn("Exception happened whiling shutting down,", e); + } + } + + private void setFileSystem(Configuration conf) throws IOException { + Configuration confCopy = new Configuration(conf); + confCopy.setBoolean("dfs.client.retry.policy.enabled", true); + String retryPolicy = + confCopy.get(YarnConfiguration.FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC, + YarnConfiguration.DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC); + confCopy.set("dfs.client.retry.policy.spec", retryPolicy); + fs = fsWorkingPath.getFileSystem(confCopy); + + // if it's local file system, use RawLocalFileSystem instead of + // LocalFileSystem, the latter one doesn't support append. + if (fs.getScheme().equals("file")) { + fs = ((LocalFileSystem)fs).getRaw(); + } + } + + private void ensureAppendEditlogFile() throws IOException { + editlogOs = fs.append(editLogPath); + } + + private void ensureCloseEditlogFile() throws IOException { + editlogOs.close(); + } + + @Override + public void persistNodeToLabelsChanges( + Map> nodeToLabels) throws IOException { + ensureAppendEditlogFile(); + editlogOs.writeInt(SerializedLogType.NODE_TO_LABELS.ordinal()); + ((SetNodeToLabelsRequestPBImpl) SetNodeToLabelsRequest + .newInstance(nodeToLabels)).getProto().writeDelimitedTo(editlogOs); + ensureCloseEditlogFile(); + } + + @Override + public void persistAddingLabels(Set labels) + throws IOException { + ensureAppendEditlogFile(); + editlogOs.writeInt(SerializedLogType.ADD_LABELS.ordinal()); + ((AddLabelsRequestPBImpl) AddLabelsRequest.newInstance(labels)).getProto() + .writeDelimitedTo(editlogOs); + ensureCloseEditlogFile(); + } + + @Override + public void persistRemovingLabels(Collection labels) + throws IOException { + ensureAppendEditlogFile(); + editlogOs.writeInt(SerializedLogType.REMOVE_LABELS.ordinal()); + ((RemoveLabelsRequestPBImpl) RemoveLabelsRequest.newInstance(Sets + .newHashSet(labels.iterator()))).getProto().writeDelimitedTo(editlogOs); + ensureCloseEditlogFile(); + } + + @Override + public void recover() throws IOException { + /* + * Steps of recover + * 1) Read from last mirror (from mirror or mirror.old) + * 2) Read from last edit log, and apply such edit log + * 3) Write new mirror to mirror.writing + * 4) Rename mirror to mirror.old + * 5) Move mirror.writing to mirror + * 6) Remove mirror.old + * 7) Remove edit log and create a new empty edit log + */ + + // Open mirror from serialized file + Path mirrorPath = new Path(rootDirPath, MIRROR_FILENAME); + Path oldMirrorPath = new Path(rootDirPath, MIRROR_FILENAME + ".old"); + + FSDataInputStream is = null; + if (fs.exists(mirrorPath)) { + is = fs.open(mirrorPath); + } else if (fs.exists(oldMirrorPath)) { + is = fs.open(oldMirrorPath); + } + + if (null != is) { + Set labels = + new AddLabelsRequestPBImpl( + AddLabelsRequestProto.parseDelimitedFrom(is)).getLabels(); + Map> nodeToLabels = + new SetNodeToLabelsRequestPBImpl( + SetNodeToLabelsRequestProto.parseDelimitedFrom(is)) + .getNodeToLabels(); + mgr.addNodeLabels(labels); + mgr.setLabelsOnMultipleNodes(nodeToLabels); + is.close(); + } + + // Open and process editlog + editLogPath = new Path(rootDirPath, EDITLOG_FILENAME); + if (fs.exists(editLogPath)) { + is = fs.open(editLogPath); + + while (true) { + try { + // read edit log one by one + SerializedLogType type = SerializedLogType.values()[is.readInt()]; + + switch (type) { + case ADD_LABELS: { + Collection partitions = + AddLabelsRequestProto.parseDelimitedFrom(is) + .getLabelsList(); + mgr.addNodeLabels(Sets.newHashSet(partitions.iterator())); + break; + } + case REMOVE_LABELS: { + Collection partitions = + RemoveLabelsRequestProto.parseDelimitedFrom(is) + .getLabelsList(); + mgr.removeNodeLabels(partitions); + break; + } + case NODE_TO_LABELS: { + Map> map = + new SetNodeToLabelsRequestPBImpl( + SetNodeToLabelsRequestProto.parseDelimitedFrom(is)) + .getNodeToLabels(); + mgr.setLabelsOnMultipleNodes(map); + break; + } + } + } catch (EOFException e) { + // EOF hit, break + break; + } + } + } + + // Serialize current mirror to mirror.writing + Path writingMirrorPath = new Path(rootDirPath, MIRROR_FILENAME + ".writing"); + FSDataOutputStream os = fs.create(writingMirrorPath, true); + ((AddLabelsRequestPBImpl) AddLabelsRequestPBImpl + .newInstance(mgr.getLabels())).getProto().writeDelimitedTo(os); + ((SetNodeToLabelsRequestPBImpl) SetNodeToLabelsRequest + .newInstance(mgr.getNodesToLabels())).getProto().writeDelimitedTo(os); + os.close(); + + // Move mirror to mirror.old + if (fs.exists(mirrorPath)) { + fs.delete(oldMirrorPath, false); + fs.rename(mirrorPath, oldMirrorPath); + } + + // move mirror.writing to mirror + fs.rename(writingMirrorPath, mirrorPath); + fs.delete(writingMirrorPath, false); + + // remove mirror.old + fs.delete(oldMirrorPath, false); + + // create a new editlog file + editlogOs = fs.create(editLogPath, true); + editlogOs.close(); + + LOG.info("Finished write mirror at:" + mirrorPath.toString()); + LOG.info("Finished create editlog file at:" + editLogPath.toString()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelConfiguration.java new file mode 100644 index 0000000..c9219c7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelConfiguration.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; + +public class NodeLabelConfiguration extends Configuration { + public final static String PREFIX = "yarn.node-label."; + + public final static String LABELS_KEY = PREFIX + "labels"; + public final static String NODES_KEY = PREFIX + "nodes"; + + public final static String NODE_LABELS_SUFFIX = ".labels"; + + public static enum LoadStrategy { + INITIAL, REPLACE, MERGE, CLEAR + } + + public NodeLabelConfiguration(String absolutePath) { + super(false); + Path absoluteLocalPath = new Path("file", "", absolutePath); + addResource(absoluteLocalPath); + } + + public Set getLabels() { + Set labelsSet = new HashSet(); + String[] labels = getStrings(LABELS_KEY); + if (null != labels) { + for (String l : labels) { + if (l.trim().isEmpty()) { + continue; + } + labelsSet.add(l); + } + } + return labelsSet; + } + + public Map> getNodeToLabels() { + Map> nodeToLabels = new HashMap>(); + + String[] nodes = getStrings(NODES_KEY); + if (null != nodes) { + for (String n : nodes) { + if (n.trim().isEmpty()) { + continue; + } + String[] labels = getStrings(NODES_KEY + "." + n + NODE_LABELS_SUFFIX); + nodeToLabels.put(n, new HashSet()); + + if (labels != null) { + for (String l : labels) { + if (l.trim().isEmpty()) { + continue; + } + nodeToLabels.get(n).add(l); + } + } + } + } + + return nodeToLabels; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtils.java new file mode 100644 index 0000000..342ea7d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtils.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + +public class NodeLabelUtils { + private static final String PARSE_FAILED_MSG = + "Failed to parse node-> labels json"; + private static final String LABELS_KEY = + "labels"; + + /** + * Get node to labels from JSON like: + * + * { + * "host1": { + * "labels": [ + * "x", + * "y", + * "z" + * ] + * }, + * "host2": { + * "labels": [ + * "a", + * "b", + * "c" + * ] + * }, + * "host3": { + * "labels": [] + * } + * } + * + * @param json + * @return node to labels map + */ + public static Map> getNodeToLabelsFromJson(String json) + throws IOException { + Map> nodeToLabels = new HashMap>(); + + if (json == null || json.trim().isEmpty()) { + return nodeToLabels; + } + + JsonParser parser = new JsonParser(); + JsonElement node; + try { + node = parser.parse(json); + } catch (JsonParseException e) { + throw new IOException(e); + } + + if (node.isJsonObject()) { + JsonObject obj = node.getAsJsonObject(); + for (Map.Entry entry : obj.entrySet()) { + String nodeName = entry.getKey().trim(); + if (nodeName.isEmpty()) { + throw new IOException(PARSE_FAILED_MSG); + } + nodeToLabels.put(nodeName, new HashSet()); + + if (entry.getValue().isJsonObject()) { + JsonObject labelObj = entry.getValue().getAsJsonObject(); + if (labelObj.entrySet().size() > 0) { + JsonElement labelsElement = labelObj.get(LABELS_KEY); + if (labelsElement == null || !labelsElement.isJsonArray()) { + throw new IOException(PARSE_FAILED_MSG); + } + JsonArray labelsArray = labelsElement.getAsJsonArray(); + for (JsonElement item : labelsArray) { + nodeToLabels.get(nodeName).add(item.getAsString()); + } + } + } else { + throw new IOException(PARSE_FAILED_MSG); + } + } + } else { + throw new IOException(PARSE_FAILED_MSG); + } + + return nodeToLabels; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsManager.java new file mode 100644 index 0000000..d1420b9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsManager.java @@ -0,0 +1,1009 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; +import java.util.regex.Pattern; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.nodelabels.event.AddLabelsEvent; +import org.apache.hadoop.yarn.nodelabels.event.NodeLabelManagerEvent; +import org.apache.hadoop.yarn.nodelabels.event.NodeLabelManagerEventType; +import org.apache.hadoop.yarn.nodelabels.event.RemoveLabelsEvent; +import org.apache.hadoop.yarn.nodelabels.event.StoreNodeToLabelsEvent; +import org.apache.hadoop.yarn.util.resource.Resources; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; + +public class NodeLabelsManager extends AbstractService { + protected static final Log LOG = LogFactory.getLog(NodeLabelsManager.class); + private static final int MAX_LABEL_LENGTH = 255; + public static final Set EMPTY_STRING_SET = Collections + .unmodifiableSet(new HashSet(0)); + public static final String ANY = "*"; + public static final Set ACCESS_ANY_LABEL_SET = ImmutableSet.of(ANY); + private static final Pattern LABEL_PATTERN = Pattern + .compile("^[0-9a-zA-Z][0-9a-zA-z-_]*"); + + /** + * If a user doesn't specify label of a queue or node, it belongs + * DEFAULT_LABEL + */ + static final String NO_LABEL = ""; + + protected Dispatcher dispatcher; + + private ConcurrentMap labelInfos = + new ConcurrentHashMap(); + private ConcurrentMap nodeInfos = + new ConcurrentHashMap(); + private ConcurrentMap queueInfos = + new ConcurrentHashMap(); + + private final ReadLock readLock; + private final WriteLock writeLock; + private AccessControlList adminAcl; + + protected NodeLabelsStore store; + + private static class LabelInfo { + Set nodes; + Set activeNodes; + Resource resource; + + LabelInfo() { + nodes = new HashSet(); + activeNodes = new HashSet(); + this.resource = Resource.newInstance(0, 0); + } + } + + private static class NodeInfo { + boolean running; + Set labels; + ConcurrentMap nms; + + NodeInfo() { + running = false; + labels = new HashSet(); + this.nms = new ConcurrentHashMap(); + } + } + + private static class QueueInfo { + Set labels; + Resource resource; + + QueueInfo() { + labels = new HashSet(); + resource = Resource.newInstance(0, 0); + } + } + + private enum UpdateLabelResourceType { + ACTIVE, DEACTIVE, UPDATE_LABEL + } + + private final class ForwardingEventHandler implements + EventHandler { + + @Override + public void handle(NodeLabelManagerEvent event) { + if (isInState(STATE.STARTED)) { + handleStoreEvent(event); + } + } + } + + // Dispatcher related code + protected void handleStoreEvent(NodeLabelManagerEvent event) { + try { + switch (event.getType()) { + case ADD_LABELS: + AddLabelsEvent addLabelsEvent = (AddLabelsEvent) event; + store.persistAddingLabels(addLabelsEvent.getLabels()); + break; + case REMOVE_LABELS: + RemoveLabelsEvent removeLabelsEvent = (RemoveLabelsEvent) event; + store.persistRemovingLabels(removeLabelsEvent.getLabels()); + break; + case STORE_NODE_TO_LABELS: + StoreNodeToLabelsEvent storeNodeToLabelsEvent = + (StoreNodeToLabelsEvent) event; + store.persistNodeToLabelsChanges(storeNodeToLabelsEvent + .getNodeToLabels()); + break; + } + } catch (IOException e) { + LOG.error("Failed to store label modification to storage"); + throw new YarnRuntimeException(e); + } + } + + public NodeLabelsManager() { + super(NodeLabelsManager.class.getName()); + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + readLock = lock.readLock(); + writeLock = lock.writeLock(); + } + + // for UT purpose + protected void initDispatcher(Configuration conf) { + // create async handler + dispatcher = new AsyncDispatcher(); + AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher; + asyncDispatcher.init(conf); + asyncDispatcher.setDrainEventsOnStop(); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + adminAcl = + new AccessControlList(conf.get(YarnConfiguration.YARN_ADMIN_ACL, + YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)); + + initNodeLabelStore(conf); + + labelInfos.put(NO_LABEL, new LabelInfo()); + } + + protected void initNodeLabelStore(Configuration conf) throws Exception { + this.store = new FileSystemNodeLabelsStore(this); + this.store.init(conf); + this.store.recover(); + } + + public boolean checkAccess(UserGroupInformation user) { + // make sure only admin can invoke + // this method + if (adminAcl.isUserAllowed(user)) { + return true; + } + return false; + } + + Map> getDefaultNodeToLabels(NodeLabelConfiguration conf) + throws IOException { + return conf.getNodeToLabels(); + } + + protected void addDefaultNodeToLabels( + Map> defaultNodeToLabels) throws IOException { + Set labels = new HashSet(); + for (Set t : defaultNodeToLabels.values()) { + labels.addAll(t); + } + addNodeLabels(labels); + + setLabelsOnMultipleNodes(defaultNodeToLabels); + } + + // for UT purpose + protected void startDispatcher() { + // start dispatcher + AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher; + asyncDispatcher.start(); + } + + @Override + protected void serviceStart() throws Exception { + // init dispatcher only when service start, because recover will happen in + // service init, we don't want to trigger any event handling at that time. + initDispatcher(getConfig()); + + dispatcher.register(NodeLabelManagerEventType.class, + new ForwardingEventHandler()); + + startDispatcher(); + } + + /** + * Add a node label to repository + * + * @param label + * node label + */ + public void addNodeLabel(String label) throws IOException { + checkLabelName(label); + addNodeLabels(ImmutableSet.of(label)); + } + + /** + * Add multiple node labels to repository + * + * @param labels + * new node labels added + */ + @SuppressWarnings("unchecked") + public void addNodeLabels(Set labels) throws IOException { + if (null == labels || labels.isEmpty()) { + return; + } + + labels = normalizeLabels(labels); + + // do a check before actual adding them, will throw exception if any of them + // doesn't meet label name requirement + for (String label : labels) { + checkLabelName(label); + if (labelInfos.containsKey(label)) { + throw new IOException("label being add is already existed, label=" + + label); + } + } + + try { + writeLock.lock(); + for (String label : labels) { + this.labelInfos.put(label, new LabelInfo()); + } + if (null != dispatcher) { + dispatcher.getEventHandler().handle(new AddLabelsEvent(labels)); + } + + LOG.info("Add labels: [" + StringUtils.join(labels.iterator(), ",") + "]"); + } finally { + writeLock.unlock(); + } + } + + private void addNMInNodeAlreadyHasNM(Set labels, Resource newNMRes) { + try { + writeLock.lock(); + for (String label : labels) { + Resource originalRes = labelInfos.get(label).resource; + labelInfos.get(label).resource = Resources.add(newNMRes, originalRes); + } + for (QueueInfo q : queueInfos.values()) { + if (isNodeUsableByQueue(labels, q)) { + Resources.addTo(q.resource, newNMRes); + } + } + } finally { + writeLock.unlock(); + } + } + + /** + * Remove a node label from repository + * + * @param labelToRemove + * node label to remove + * @throws IOException + */ + public void removeNodeLabel(String labelToRemove) throws IOException { + removeNodeLabels(Arrays.asList(labelToRemove)); + } + + private void + removeNMToNodeAlreadyHasNM(Set labels, Resource newNMRes) { + try { + writeLock.lock(); + for (String label : labels) { + Resource originalRes = labelInfos.get(label).resource; + labelInfos.get(label).resource = + Resources.subtract(originalRes, newNMRes); + } + for (QueueInfo q : queueInfos.values()) { + if (isNodeUsableByQueue(labels, q)) { + Resources.subtractFrom(q.resource, newNMRes); + } + } + } finally { + writeLock.unlock(); + } + } + + /** + * Set label on node, if label is null or empty, it means remove label on node + * + * @param node + * @param labels + */ + public void removeLabelsOnNodes(String node, Set labels) + throws IOException { + setLabelsOnMultipleNodes(ImmutableMap.of(node, labels)); + } + + /** + * Remove multiple node labels from repository + * + * @param labelsToRemove + * node labels to remove + * @throws IOException + */ + @SuppressWarnings("unchecked") + public void removeNodeLabels(Collection labelsToRemove) + throws IOException { + if (null == labelsToRemove || labelsToRemove.isEmpty()) { + return; + } + + // Check if label to remove doesn't existed or null/empty, will throw + // exception if any of labels to remove doesn't meet requirement + for (String label : labelsToRemove) { + label = normalizeLabel(label); + if (label == null || label.isEmpty()) { + throw new IOException("Label to be removed is null or empty"); + } + + if (!labelInfos.containsKey(label)) { + throw new IOException( + "Label to be removed doesn't contain by NodeLabelManager" + + ", label=" + label); + } + + // check if any queue contains this label + for (Entry entry : queueInfos.entrySet()) { + String queueName = entry.getKey(); + Set queueLabels = entry.getValue().labels; + if (queueLabels.contains(label)) { + throw new IOException("Cannot remove label=" + label + + ", because queue=" + queueName + " is using this label. " + + "Please remove label on queue before remove the label"); + } + } + } + + try { + writeLock.lock(); + + Map> labelToActiveNodeAdded = + new HashMap>(); + Map> labelToActiveNodeRemoved = + new HashMap>(); + Map> originalNodeToLabels = + new HashMap>(); + + for (String label : labelsToRemove) { + label = normalizeLabel(label); + + // remove it from label + LabelInfo removedLabel = labelInfos.remove(label); + + // update node to labels + for (String node : removedLabel.nodes) { + NodeInfo n = nodeInfos.get(node); + if (!originalNodeToLabels.containsKey(node)) { + Set originalLabels = Sets.newHashSet(n.labels); + originalNodeToLabels.put(node, originalLabels); + } + nodeInfos.get(node).labels.remove(label); + // if we don't have any labels in a node now, we will mark this node + // as no label + if (n.running && n.labels.isEmpty()) { + add(labelToActiveNodeAdded, NO_LABEL, node); + } + } + } + + // update resource + updateLabelResource(labelToActiveNodeAdded, labelToActiveNodeRemoved, + originalNodeToLabels, UpdateLabelResourceType.UPDATE_LABEL); + + // create event to remove labels + if (null != dispatcher) { + dispatcher.getEventHandler().handle( + new RemoveLabelsEvent(labelsToRemove)); + } + + LOG.info("Remove labels: [" + + StringUtils.join(labelsToRemove.iterator(), ",") + "]"); + } finally { + writeLock.unlock(); + } + } + + /** + * Remove labels on given nodes + * + * @param nodes + * to remove labels + */ + public void removeLabelsOnNodes(Collection nodes) throws IOException { + Map> map = + new HashMap>(nodes.size()); + for (String node : nodes) { + map.put(node, EMPTY_STRING_SET); + } + setLabelsOnMultipleNodes(map); + } + + /** + * Remove label on given node + * + * @param node + * to remove label + */ + public void removeLabelOnNode(String node) throws IOException { + removeLabelsOnNodes(Arrays.asList(node)); + } + + private void updateLabelResource(Map> addLabelToNodes, + Map> removeLabelToNodes, + Map> originalNodeToLabels, + UpdateLabelResourceType updateType) { + try { + writeLock.lock(); + + // process add label to nodes + if (addLabelToNodes != null) { + for (Entry> entry : addLabelToNodes.entrySet()) { + String label = entry.getKey(); + Set nodes = entry.getValue(); + + // update label to active nodes + labelInfos.get(label).activeNodes.addAll(addLabelToNodes.get(label)); + + // update label to resource + Resource res = Resource.newInstance(0, 0); + for (String node : nodes) { + Resources.addTo(res, getResourceOfNode(node)); + } + Resource originalRes = labelInfos.get(label).resource; + labelInfos.get(label).resource = Resources.add(res, originalRes); + } + } + + // process remove label to nodes + if (removeLabelToNodes != null) { + for (Entry> entry : removeLabelToNodes.entrySet()) { + String label = entry.getKey(); + Set nodes = entry.getValue(); + + // update label to active nodes + labelInfos.get(label).activeNodes.removeAll(nodes); + + // update label to resource + Resource res = Resource.newInstance(0, 0); + for (String node : nodes) { + Resources.addTo(res, getResourceOfNode(node)); + } + Resource originalRes = labelInfos.get(label).resource; + labelInfos.get(label).resource = Resources.subtract(originalRes, res); + } + } + + // update queue to resource + for (Entry> originEntry : originalNodeToLabels + .entrySet()) { + String node = originEntry.getKey(); + Set originLabels = originEntry.getValue(); + Set nowLabels = nodeInfos.get(node).labels; + + for (QueueInfo q : queueInfos.values()) { + Resource queueResource = q.resource; + boolean pastUsable = isNodeUsableByQueue(originLabels, q); + boolean nowUsable = isNodeUsableByQueue(nowLabels, q); + + if (updateType == UpdateLabelResourceType.UPDATE_LABEL) { + if (pastUsable && !nowUsable) { + Resources.subtractFrom(queueResource, getResourceOfNode(node)); + } else if (!pastUsable && nowUsable) { + Resources.addTo(queueResource, getResourceOfNode(node)); + } + } else if (updateType == UpdateLabelResourceType.ACTIVE) { + if (nowUsable) { + Resources.addTo(queueResource, getResourceOfNode(node)); + } + } else if (updateType == UpdateLabelResourceType.DEACTIVE) { + if (nowUsable) { + Resources.subtractFrom(queueResource, getResourceOfNode(node)); + } + } + } + } + } finally { + writeLock.unlock(); + } + } + + /** + * Set node -> label, if label is null or empty, it means remove label on node + * + * @param newNodeToLabels + * node -> label map + */ + @SuppressWarnings("unchecked") + public void + setLabelsOnMultipleNodes(Map> newNodeToLabels) + throws IOException { + if (null == newNodeToLabels || newNodeToLabels.isEmpty()) { + return; + } + + try { + writeLock.lock(); + + Map> labelToActiveNodeAdded = + new HashMap>(); + Map> labelToActiveNodeRemoved = + new HashMap>(); + Map> originalNodeToLabels = + new HashMap>(); + + for (Entry> e : newNodeToLabels.entrySet()) { + String node = e.getKey(); + nodeInfos.putIfAbsent(node, new NodeInfo()); + NodeInfo n = nodeInfos.get(node); + Set labels = e.getValue(); + + // normalize and verify + labels = normalizeLabels(labels); + for (String label : labels) { + verifyNodeLabel(node, label); + } + + // handling labels removed + Set difference = Sets.difference(n.labels, labels); + for (String removedLabel : difference) { + labelInfos.get(removedLabel).nodes.remove(node); + if (n.running) { + add(labelToActiveNodeRemoved, removedLabel, node); + } + } + + // Mark this node as "no-label" if we set a empty set of label + if (labels.isEmpty() && !n.labels.isEmpty() && n.running) { + add(labelToActiveNodeAdded, NO_LABEL, node); + } + + // handling labels added + for (String addedLabel : Sets.difference(labels, n.labels)) { + labelInfos.get(addedLabel).nodes.add(node); + if (n.running) { + add(labelToActiveNodeAdded, addedLabel, node); + } + } + + // Mark this node not "no-label" if we set a non-empty set of label + if (!labels.isEmpty() && n.labels.isEmpty() && n.running) { + add(labelToActiveNodeRemoved, NO_LABEL, node); + } + } + + // save original node to labels + for (String node : newNodeToLabels.keySet()) { + NodeInfo n = nodeInfos.get(node); + if (!originalNodeToLabels.containsKey(node)) { + Set originalLabels = Sets.newHashSet(n.labels); + originalNodeToLabels.put(node, originalLabels); + } + n.labels.clear(); + n.labels.addAll(newNodeToLabels.get(node)); + } + + updateLabelResource(labelToActiveNodeAdded, labelToActiveNodeRemoved, + originalNodeToLabels, UpdateLabelResourceType.UPDATE_LABEL); + + if (null != dispatcher) { + dispatcher.getEventHandler().handle( + new StoreNodeToLabelsEvent(newNodeToLabels)); + } + + // shows node->labels we added + LOG.info("setLabelsOnMultipleNodes:"); + for (Entry> entry : newNodeToLabels.entrySet()) { + LOG.info(" host=" + entry.getKey() + ", labels=[" + + StringUtils.join(entry.getValue().iterator(), ",") + "]"); + } + } finally { + writeLock.unlock(); + } + } + + public void setLabelsOnSingleNode(String node, Set labels) + throws IOException { + setLabelsOnMultipleNodes(ImmutableMap.of(node, labels)); + } + + /* + * Following methods are used for setting if a node is up and running, which + * will be used by this#getActiveNodesByLabel and getLabelResource + */ + public void activatedNode(NodeId node, Resource resource) { + try { + writeLock.lock(); + String nodeName = node.getHost(); + nodeInfos.putIfAbsent(nodeName, new NodeInfo()); + NodeInfo n = nodeInfos.get(nodeName); + + if (null != n.nms.put(node, resource)) { + String msg = + "This shouldn't happen, trying to active node," + + " but there's already a node here, " + + "please check what happened. NodeId=" + node.toString(); + LOG.warn(msg); + return; + } + + // add add it to running node + n.running = true; + + // update resources + Set labels = n.labels; + labels = + (labels == null || labels.isEmpty()) ? ImmutableSet.of(NO_LABEL) + : labels; + + if (n.nms.size() <= 1) { + Map> labelToActiveNodeAdded = + new HashMap>(); + for (String label : labels) { + labelToActiveNodeAdded.put(label, ImmutableSet.of(nodeName)); + } + Map> originalNodeTolabels = + new HashMap>(); + originalNodeTolabels.put(nodeName, n.labels); + updateLabelResource(labelToActiveNodeAdded, null, originalNodeTolabels, + UpdateLabelResourceType.ACTIVE); + } else { + // Support more than two NMs in a same node + addNMInNodeAlreadyHasNM(labels, resource); + } + } finally { + writeLock.unlock(); + } + } + + public void deactivateNode(NodeId node) { + try { + writeLock.lock(); + String nodeName = node.getHost(); + Resource res = null; + NodeInfo n = nodeInfos.get(nodeName); + if (null == n) { + return; + } + + // update resources + Set labels = n.labels; + labels = + labels == null || labels.isEmpty() ? ImmutableSet.of(NO_LABEL) + : labels; + + // this is last NM in this node + if (n.nms.size() == 1) { + Map> labelToActiveNodeRemoved = + new HashMap>(); + for (String label : labels) { + labelToActiveNodeRemoved.put(label, ImmutableSet.of(nodeName)); + labelInfos.get(label).activeNodes.remove(nodeName); + } + Map> originalNodeTolabels = + new HashMap>(); + originalNodeTolabels.put(nodeName, n.labels); + updateLabelResource(null, labelToActiveNodeRemoved, + originalNodeTolabels, UpdateLabelResourceType.DEACTIVE); + } + + // update node to resource + if (null == (res = n.nms.remove(node))) { + String msg = + "Trying to deactive node," + + " but there's doesn't exist a node here." + + " It may caused by re-registering a unhealthy node" + + " (make it become healthy). " + + "Please check what happened. NodeId=" + node.toString(); + LOG.warn(msg); + } + + // if there's more NM remains + if (n.nms.size() > 0) { + // Support more than two NMs in a same node + removeNMToNodeAlreadyHasNM(labels, res); + } else { + // We don't have more NMs in this node, set running to false + n.running = false; + } + } finally { + writeLock.unlock(); + } + } + + public void updateNodeResource(NodeId node, Resource newResource) { + deactivateNode(node); + activatedNode(node, newResource); + } + + /** + * Clear all labels and related mapping from NodeLabelManager + * + * @throws IOException + */ + public void clearAllLabels() throws IOException { + try { + writeLock.lock(); + Set dupLabels = Sets.newHashSet(getLabels()); + removeNodeLabels(dupLabels); + } finally { + writeLock.unlock(); + } + } + + public void reinitializeQueueLabels(Map> queueToLabels) { + try { + writeLock.lock(); + // clear before set + this.queueInfos.clear(); + + for (Entry> entry : queueToLabels.entrySet()) { + String queue = entry.getKey(); + QueueInfo q = new QueueInfo(); + this.queueInfos.put(queue, q); + + Set labels = entry.getValue(); + labels = labels.isEmpty() ? ImmutableSet.of(NO_LABEL) : labels; + if (labels.contains(ANY)) { + continue; + } + + q.labels.addAll(labels); + + // empty label node can be accessed by any queue + Set dupLabels = new HashSet(labels); + dupLabels.add(""); + Set accessedNodes = new HashSet(); + Resource totalResource = Resource.newInstance(0, 0); + for (String label : dupLabels) { + LabelInfo l = labelInfos.get(label); + for (String node : l.activeNodes) { + if (!accessedNodes.contains(node)) { + accessedNodes.add(node); + Resources.addTo(totalResource, getResourceOfNode(node)); + } + } + } + q.resource = totalResource; + } + } finally { + writeLock.unlock(); + } + } + + private Resource getResourceOfNode(String node) { + Resource res = Resource.newInstance(0, 0); + for (Resource r : nodeInfos.get(node).nms.values()) { + Resources.addTo(res, r); + } + return res; + } + + public Resource getResourceWithNoLabel() throws IOException { + return getResourceWithLabel(NO_LABEL); + } + + public Resource getResourceWithLabel(String label) { + label = normalizeLabel(label); + try { + readLock.lock(); + if (null == labelInfos.get(label)) { + return Resources.none(); + } + return labelInfos.get(label).resource; + } finally { + readLock.unlock(); + } + } + + /** + * Get nodes by given label + * + * @param label + * @return nodes has assigned give label label + */ + public Collection getActiveNodesByLabel(String label) { + label = normalizeLabel(label); + try { + readLock.lock(); + LabelInfo l = labelInfos.get(label); + if (null == l) { + return null; + } + return Collections.unmodifiableCollection(l.activeNodes); + } finally { + readLock.unlock(); + } + } + + /** + * Get number of active nodes by given label + * + * @param label + * @return Get number of active nodes by given label + */ + public int getNumOfActiveNodesByLabel(String label) { + label = normalizeLabel(label); + try { + readLock.lock(); + LabelInfo l = labelInfos.get(label); + if (null == l) { + return 0; + } + return l.activeNodes.size(); + } finally { + readLock.unlock(); + } + } + + /** + * Get mapping of nodes to labels + * + * @return nodes to labels map + */ + public Map> getNodesToLabels() { + try { + readLock.lock(); + Map> nodeToLabels = + new HashMap>(); + for (Entry entry : nodeInfos.entrySet()) { + String nodeName = entry.getKey(); + NodeInfo n = entry.getValue(); + if (n.labels.isEmpty()) { + continue; + } + nodeToLabels.put(nodeName, n.labels); + } + return Collections.unmodifiableMap(nodeToLabels); + } finally { + readLock.unlock(); + } + } + + public Set getLabelsOnNode(String node) { + if (nodeInfos.containsKey(node)) { + return nodeInfos.get(node).labels; + } + return EMPTY_STRING_SET; + } + + /** + * Get existing valid labels in repository + * + * @return existing valid labels in repository + */ + public Set getLabels() { + try { + readLock.lock(); + Set labels = new HashSet(labelInfos.keySet()); + labels.remove(NO_LABEL); + return Collections.unmodifiableSet(labels); + } finally { + readLock.unlock(); + } + } + + public Resource getQueueResource(String queueName, Set queueLabels, + Resource clusterResource) { + if (queueLabels.contains(ANY)) { + return clusterResource; + } + QueueInfo q = queueInfos.get(queueName); + if (null == q) { + return Resources.none(); + } + return q.resource; + } + + public boolean containsLabel(String label) { + try { + readLock.lock(); + return label != null + && (label.isEmpty() || labelInfos.containsKey(label)); + } finally { + readLock.unlock(); + } + } + + protected void checkLabelName(String label) throws IOException { + if (label == null || label.isEmpty() || label.length() > MAX_LABEL_LENGTH) { + throw new IOException("label added is empty or exceeds " + + MAX_LABEL_LENGTH + " character(s)"); + } + label = label.trim(); + + boolean match = LABEL_PATTERN.matcher(label).matches(); + + if (!match) { + throw new IOException("label name should only contains " + + "{0-9, a-z, A-Z, -, _} and should not started with {-,_}" + + ", now it is=" + label); + } + } + + protected String normalizeLabel(String label) { + if (label != null) { + return label.trim(); + } + return NO_LABEL; + } + + protected Set normalizeLabels(Set labels) { + Set newLabels = new HashSet(); + for (String label : labels) { + newLabels.add(normalizeLabel(label)); + } + return newLabels; + } + + private boolean isNodeUsableByQueue(Set nodeLabels, QueueInfo q) { + // node without any labels can be accessed by any queue + if (nodeLabels == null || nodeLabels.isEmpty() + || (nodeLabels.size() == 1 && nodeLabels.contains(NO_LABEL))) { + return true; + } + + for (String label : nodeLabels) { + if (q.labels.contains(label)) { + return true; + } + } + + return false; + } + + private void verifyNodeLabel(String node, String label) throws IOException { + if (node == null || node.isEmpty()) { + throw new IOException( + "Trying to change label on a node, but node is null or empty"); + } + if (label != null && !label.isEmpty() && !labelInfos.containsKey(label)) { + throw new IOException("Label doesn't exist in repository, " + + "have you added it before? label=" + label); + } + } + + private void add(Map> map, String key, String value) { + if (value == null) { + return; + } + if (!map.containsKey(key)) { + map.put(key, new HashSet()); + } + map.get(key).add(value); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java new file mode 100644 index 0000000..9e1a9a1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java @@ -0,0 +1,53 @@ +package org.apache.hadoop.yarn.nodelabels; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; + +public abstract class NodeLabelsStore { + protected final NodeLabelsManager mgr; + protected Configuration conf; + + public NodeLabelsStore(NodeLabelsManager mgr) { + this.mgr = mgr; + } + + /** + * Store node -> label + */ + public abstract void persistNodeToLabelsChanges( + Map> nodeToLabels) throws IOException; + + /** + * Store new labels + */ + public abstract void persistAddingLabels(Set label) + throws IOException; + + /** + * Remove labels + */ + public abstract void persistRemovingLabels(Collection labels) + throws IOException; + + /** + * Recover labels and node to labels mappings from store + * @param conf + */ + public abstract void recover() throws IOException; + + public void init(Configuration conf) throws Exception { + this.conf = conf; + } + + public void finalize() throws Exception { + + } + + public NodeLabelsManager getNodeLabelsManager() { + return mgr; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/AddLabelsEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/AddLabelsEvent.java new file mode 100644 index 0000000..781d05f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/AddLabelsEvent.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels.event; + +import java.util.Set; + +public class AddLabelsEvent extends NodeLabelManagerEvent { + private Set labels; + + public AddLabelsEvent(Set labels) { + super(NodeLabelManagerEventType.ADD_LABELS); + this.labels = labels; + } + + public Set getLabels() { + return labels; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelManagerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelManagerEvent.java new file mode 100644 index 0000000..d67fd7e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelManagerEvent.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels.event; + +import org.apache.hadoop.yarn.event.AbstractEvent; + +public class NodeLabelManagerEvent extends + AbstractEvent { + public NodeLabelManagerEvent(NodeLabelManagerEventType type) { + super(type); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelManagerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelManagerEventType.java new file mode 100644 index 0000000..7053b19 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelManagerEventType.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels.event; + +public enum NodeLabelManagerEventType { + REMOVE_LABELS, + ADD_LABELS, + STORE_NODE_TO_LABELS +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/RemoveLabelsEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/RemoveLabelsEvent.java new file mode 100644 index 0000000..e5d9517 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/RemoveLabelsEvent.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels.event; + +import java.util.Collection; + +public class RemoveLabelsEvent extends NodeLabelManagerEvent { + private Collection labels; + + public RemoveLabelsEvent(Collection labels) { + super(NodeLabelManagerEventType.REMOVE_LABELS); + this.labels = labels; + } + + public Collection getLabels() { + return labels; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/StoreNodeToLabelsEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/StoreNodeToLabelsEvent.java new file mode 100644 index 0000000..e3f955c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/StoreNodeToLabelsEvent.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels.event; + +import java.util.Map; +import java.util.Set; + +public class StoreNodeToLabelsEvent extends NodeLabelManagerEvent { + private Map> nodeToLabels; + + public StoreNodeToLabelsEvent(Map> nodeToLabels) { + super(NodeLabelManagerEventType.STORE_NODE_TO_LABELS); + this.nodeToLabels = nodeToLabels; + } + + public Map> getNodeToLabels() { + return nodeToLabels; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/MemoryNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/MemoryNodeLabelsManager.java new file mode 100644 index 0000000..cdd2ca5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/MemoryNodeLabelsManager.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.event.InlineDispatcher; + +public class MemoryNodeLabelsManager extends NodeLabelsManager { + Map> lastNodeToLabels = null; + Collection lastAddedlabels = null; + Collection lastRemovedlabels = null; + + @Override + public void initNodeLabelStore(Configuration conf) { + this.store = new NodeLabelsStore(this) { + + @Override + public void recover() throws IOException { + } + + @Override + public void persistRemovingLabels(Collection labels) + throws IOException { + lastRemovedlabels = labels; + } + + @Override + public void persistNodeToLabelsChanges(Map> nodeToLabels) + throws IOException { + lastNodeToLabels = nodeToLabels; + } + + @Override + public void persistAddingLabels(Set label) throws IOException { + lastAddedlabels = label; + } + }; + } + + @Override + protected void initDispatcher(Configuration conf) { + super.dispatcher = new InlineDispatcher(); + } + + @Override + protected void startDispatcher() { + // do nothing + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java new file mode 100644 index 0000000..e50146f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import org.junit.Assert; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Sets; + +public class NodeLabelTestBase { + public static void assertMapEquals(Map> m1, + ImmutableMap> m2) { + Assert.assertEquals(m1.size(), m2.size()); + for (String k : m1.keySet()) { + Assert.assertTrue(m2.containsKey(k)); + assertCollectionEquals(m1.get(k), m2.get(k)); + } + } + + public static void assertMapContains(Map> m1, + ImmutableMap> m2) { + for (String k : m2.keySet()) { + Assert.assertTrue(m1.containsKey(k)); + assertCollectionEquals(m1.get(k), m2.get(k)); + } + } + + public static void assertCollectionEquals(Collection c1, + Collection c2) { + Assert.assertEquals(c1.size(), c2.size()); + Iterator i1 = c1.iterator(); + Iterator i2 = c2.iterator(); + while (i1.hasNext()) { + Assert.assertEquals(i1.next(), i2.next()); + } + } + + public static Set toSet(E... elements) { + Set set = Sets.newHashSet(elements); + return set; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java new file mode 100644 index 0000000..8339d64 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.InlineDispatcher; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableMap; + +public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase { + MockNodeLabelManager mgr = null; + Configuration conf = null; + + private static class MockNodeLabelManager extends + NodeLabelsManager { + @Override + protected void initDispatcher(Configuration conf) { + super.dispatcher = new InlineDispatcher(); + } + + @Override + protected void startDispatcher() { + // do nothing + } + } + + private FileSystemNodeLabelsStore getStore() { + return (FileSystemNodeLabelsStore) mgr.store; + } + + @Before + public void before() throws IOException { + mgr = new MockNodeLabelManager(); + conf = new Configuration(); + File tempDir = File.createTempFile("nlb", ".tmp"); + tempDir.delete(); + tempDir.mkdirs(); + tempDir.deleteOnExit(); + conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_URI, + tempDir.getAbsolutePath()); + mgr.init(conf); + mgr.start(); + } + + @After + public void after() throws IOException { + getStore().fs.delete(getStore().rootDirPath, true); + mgr.stop(); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 10000) + public void testRecoverWithMirror() throws Exception { + mgr.addNodeLabels(toSet("p1", "p2", "p3")); + mgr.addNodeLabel("p4"); + mgr.addNodeLabels(toSet("p5", "p6")); + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n1", toSet("p1"), "n2", + toSet("p2"))); + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n3", toSet("p3"), "n4", + toSet("p4"), "n5", toSet("p5"), "n6", toSet("p6"), "n7", toSet("p6"))); + + /* + * node -> partition p1: n1 p2: n2 p3: n3 p4: n4 p5: n5 p6: n6, n7 + */ + + mgr.removeNodeLabel("p1"); + mgr.removeNodeLabels(Arrays.asList("p3", "p5")); + + /* + * After removed p2: n2 p4: n4 p6: n6, n7 + */ + // shutdown mgr and start a new mgr + mgr.stop(); + + mgr = new MockNodeLabelManager(); + mgr.init(conf); + + // check variables + Assert.assertEquals(3, mgr.getLabels().size()); + Assert.assertTrue(mgr.getLabels().containsAll( + Arrays.asList("p2", "p4", "p6"))); + + assertMapContains(mgr.getNodesToLabels(), ImmutableMap.of("n2", + toSet("p2"), "n4", toSet("p4"), "n6", toSet("p6"), "n7", toSet("p6"))); + + // stutdown mgr and start a new mgr + mgr.stop(); + mgr = new MockNodeLabelManager(); + mgr.init(conf); + + // check variables + Assert.assertEquals(3, mgr.getLabels().size()); + Assert.assertTrue(mgr.getLabels().containsAll( + Arrays.asList("p2", "p4", "p6"))); + + assertMapContains(mgr.getNodesToLabels(), ImmutableMap.of("n2", + toSet("p2"), "n4", toSet("p4"), "n6", toSet("p6"), "n7", toSet("p6"))); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 10000) + public void testEditlogRecover() throws Exception { + mgr.addNodeLabels(toSet("p1", "p2", "p3")); + mgr.addNodeLabel("p4"); + mgr.addNodeLabels(toSet("p5", "p6")); + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n1", toSet("p1"), "n2", + toSet("p2"))); + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n3", toSet("p3"), "n4", + toSet("p4"), "n5", toSet("p5"), "n6", toSet("p6"), "n7", toSet("p6"))); + + /* + * node -> partition p1: n1 p2: n2 p3: n3 p4: n4 p5: n5 p6: n6, n7 + */ + + mgr.removeNodeLabel("p1"); + mgr.removeNodeLabels(Arrays.asList("p3", "p5")); + + /* + * After removed p2: n2 p4: n4 p6: n6, n7 + */ + // shutdown mgr and start a new mgr + mgr.stop(); + + mgr = new MockNodeLabelManager(); + mgr.init(conf); + + // check variables + Assert.assertEquals(3, mgr.getLabels().size()); + Assert.assertTrue(mgr.getLabels().containsAll( + Arrays.asList("p2", "p4", "p6"))); + + assertMapContains(mgr.getNodesToLabels(), ImmutableMap.of( + "n2", toSet("p2"), "n4", toSet("p4"), "n6", toSet("p6"), "n7", + toSet("p6"))); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelUtils.java new file mode 100644 index 0000000..f432811 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelUtils.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.yarn.nodelabels.NodeLabelUtils; +import org.junit.Assert; +import org.junit.Test; + +import com.google.common.collect.ImmutableMap; + +public class TestNodeLabelUtils extends NodeLabelTestBase { + private void assertParseShouldFail(String json, boolean shouldFail) { + try { + NodeLabelUtils.getNodeToLabelsFromJson(json); + if (shouldFail) { + Assert.fail("should fail:" + json == null ? "" : json); + } + } catch (IOException e) { + if (!shouldFail) { + Assert.fail("shouldn't fail:" + json == null ? "" : json); + } + } + } + + private void assertParseFailed(String json) { + assertParseShouldFail(json, true); + } + + @Test + public void testParseNodeToLabelsFromJson() throws IOException { + // empty and null + assertParseShouldFail(null, false); + assertParseShouldFail("", false); + + // empty host + String json = + "{\"host1\":{\"labels\":[\"x\",\"y\"]}, \"\":{\"labels\":[\"x\",\"y\"]}}"; + assertParseFailed(json); + + // not json object + json = + "[\"host1\":{\"labels\":[\"x\",\"y\"]}, \"\":{\"labels\":[\"x\",\"y\"]}]"; + assertParseFailed(json); + + // don't have labels + json = + "[\"host1\":{\"labels\":[\"x\",\"y\"]}, \"\":{\"tag\":[\"x\",\"y\"]}]"; + assertParseFailed(json); + + // labels is not array + json = "{\"host1\":{\"labels\":{\"x\":\"y\"}}}"; + assertParseFailed(json); + + // not a valid json + json = "[ }"; + assertParseFailed(json); + + // normal case #1 + json = + "{\"host1\":{\"labels\":[\"x\",\"y\"]}, \"host2\":{\"labels\":[\"x\",\"y\"]}}"; + Map> nodeToLabels = + NodeLabelUtils.getNodeToLabelsFromJson(json); + assertMapEquals(nodeToLabels, + ImmutableMap.of("host1", toSet("x", "y"), "host2", toSet("x", "y"))); + + // normal case #2 + json = + "{\"host1\":{\"labels\":[\"x\",\"y\"]}, \"host2\":{\"labels\":[\"a\",\"b\"]}}"; + nodeToLabels = NodeLabelUtils.getNodeToLabelsFromJson(json); + assertMapEquals(nodeToLabels, + ImmutableMap.of("host1", toSet("x", "y"), "host2", toSet("a", "b"))); + + // label is empty #1 + json = "{\"host1\":{\"labels\":[\"x\",\"y\"]}, \"host2\":{\"labels\":[]}}"; + nodeToLabels = NodeLabelUtils.getNodeToLabelsFromJson(json); + assertMapEquals(nodeToLabels, ImmutableMap.of("host1", toSet("x", "y"), + "host2", new HashSet())); + + // label is empty #2 + json = "{\"host1\":{\"labels\":[\"x\",\"y\"]}, \"host2\":{}}"; + nodeToLabels = NodeLabelUtils.getNodeToLabelsFromJson(json); + assertMapEquals(nodeToLabels, ImmutableMap.of("host1", toSet("x", "y"), + "host2", new HashSet())); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelsManager.java new file mode 100644 index 0000000..2a563b0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelsManager.java @@ -0,0 +1,556 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Sets; + +public class TestNodeLabelsManager extends NodeLabelTestBase { + private final Resource EMPTY_RESOURCE = Resource.newInstance(0, 0); + private final Resource SMALL_NODE = Resource.newInstance(100, 0); + private final Resource LARGE_NODE = Resource.newInstance(1000, 0); + + MemoryNodeLabelsManager mgr = null; + + @Before + public void before() { + mgr = new MemoryNodeLabelsManager(); + mgr.init(new Configuration()); + mgr.start(); + } + + @After + public void after() { + mgr.stop(); + } + + @Test(timeout = 5000) + public void testAddRemovelabel() throws Exception { + // Add some label + mgr.addNodeLabel("hello"); + assertCollectionEquals(mgr.lastAddedlabels, Arrays.asList("hello")); + + mgr.addNodeLabel("world"); + mgr.addNodeLabels(toSet("hello1", "world1")); + assertCollectionEquals(mgr.lastAddedlabels, + Sets.newHashSet("hello1", "world1")); + + Assert.assertTrue(mgr.getLabels().containsAll( + Sets.newHashSet("hello", "world", "hello1", "world1"))); + + // try to remove null, empty and non-existed label, should fail + for (String p : Arrays.asList(null, NodeLabelsManager.NO_LABEL, "xx")) { + boolean caught = false; + try { + mgr.removeNodeLabel(p); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("remove label should fail " + + "when label is null/empty/non-existed", caught); + } + + // Remove some label + mgr.removeNodeLabel("hello"); + assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("hello")); + Assert.assertTrue(mgr.getLabels().containsAll( + Arrays.asList("world", "hello1", "world1"))); + + mgr.removeNodeLabels(Arrays.asList("hello1", "world1", "world")); + Assert.assertTrue(mgr.lastRemovedlabels.containsAll(Sets.newHashSet( + "hello1", "world1", "world"))); + Assert.assertTrue(mgr.getLabels().isEmpty()); + } + + @Test(timeout = 5000) + public void testAddlabelWithCase() throws Exception { + // Add some label + mgr.addNodeLabel("HeLlO"); + assertCollectionEquals(mgr.lastAddedlabels, Arrays.asList("HeLlO")); + Assert.assertFalse(mgr.getLabels().containsAll(Arrays.asList("hello"))); + } + + @Test(timeout = 5000) + public void testAddInvalidlabel() throws IOException { + boolean caught = false; + try { + mgr.addNodeLabel(null); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("null label should not add to repo", caught); + + caught = false; + try { + mgr.addNodeLabel(NodeLabelsManager.NO_LABEL); + } catch (IOException e) { + caught = true; + } + + Assert.assertTrue("empty label should not add to repo", caught); + + caught = false; + try { + mgr.addNodeLabel("-?"); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("invalid label charactor should not add to repo", caught); + + caught = false; + try { + mgr.addNodeLabel(StringUtils.repeat("c", 257)); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("too long label should not add to repo", caught); + + caught = false; + try { + mgr.addNodeLabel("-aaabbb"); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("label cannot start with \"-\"", caught); + + caught = false; + try { + mgr.addNodeLabel("_aaabbb"); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("label cannot start with \"_\"", caught); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 5000) + public void testSetRemoveLabelsOnNodes() throws Exception { + // set a label on a node, but label doesn't exist + boolean caught = false; + try { + mgr.setLabelsOnSingleNode("node", toSet("label")); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("trying to set a label to a node but " + + "label doesn't exist in repository should fail", caught); + + // set a label on a node, but node is null or empty + try { + mgr.setLabelsOnSingleNode(NodeLabelsManager.NO_LABEL, toSet("label")); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("trying to add a empty node but succeeded", caught); + + // set node->label one by one + mgr.addNodeLabels(toSet("p1", "p2", "p3")); + mgr.setLabelsOnSingleNode("n1", toSet("p1")); + mgr.setLabelsOnSingleNode("n1", toSet("p2")); + mgr.setLabelsOnSingleNode("n2", toSet("p3")); + assertMapEquals(mgr.getNodesToLabels(), + ImmutableMap.of("n1", toSet("p2"), "n2", toSet("p3"))); + assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of("n2", toSet("p3"))); + + // set bunch of node->label + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n3", toSet("p3"), "n1", + toSet("p1"))); + assertMapEquals(mgr.getNodesToLabels(), ImmutableMap.of("n1", toSet("p1"), + "n2", toSet("p3"), "n3", toSet("p3"))); + assertMapEquals(mgr.lastNodeToLabels, + ImmutableMap.of("n3", toSet("p3"), "n1", toSet("p1"))); + + // remove label on node + mgr.removeLabelOnNode("n1"); + assertMapEquals(mgr.getNodesToLabels(), + ImmutableMap.of("n2", toSet("p3"), "n3", toSet("p3"))); + assertMapEquals(mgr.lastNodeToLabels, + ImmutableMap.of("n1", NodeLabelsManager.EMPTY_STRING_SET)); + + // remove labels on node + mgr.removeLabelsOnNodes(Arrays.asList("n2", "n3")); + Assert.assertEquals(0, mgr.getNodesToLabels().size()); + assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of("n2", + NodeLabelsManager.EMPTY_STRING_SET, "n3", + NodeLabelsManager.EMPTY_STRING_SET)); + } + + @Test(timeout = 5000) + public void testRemovelabelWithNodes() throws Exception { + mgr.addNodeLabels(toSet("p1", "p2", "p3")); + mgr.setLabelsOnSingleNode("n1", toSet("p1")); + mgr.setLabelsOnSingleNode("n2", toSet("p2")); + mgr.setLabelsOnSingleNode("n3", toSet("p3")); + + mgr.removeNodeLabel("p1"); + assertMapEquals(mgr.getNodesToLabels(), + ImmutableMap.of("n2", toSet("p2"), "n3", toSet("p3"))); + assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("p1")); + + mgr.removeNodeLabels(Arrays.asList("p2", "p3")); + Assert.assertTrue(mgr.getNodesToLabels().isEmpty()); + Assert.assertTrue(mgr.getLabels().isEmpty()); + assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("p2", "p3")); + } + + @Test(timeout = 5000) + public void testNodeActiveDeactiveUpdate() throws Exception { + mgr.addNodeLabels(toSet("p1", "p2", "p3")); + mgr.setLabelsOnSingleNode("n1", toSet("p1")); + mgr.setLabelsOnSingleNode("n2", toSet("p2")); + mgr.setLabelsOnSingleNode("n3", toSet("p3")); + + Assert.assertEquals(mgr.getResourceWithLabel("p1"), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getResourceWithLabel("p2"), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getResourceWithLabel("p3"), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getResourceWithLabel(NodeLabelsManager.NO_LABEL), + EMPTY_RESOURCE); + + // active two NM to n1, one large and one small + mgr.activatedNode(NodeId.newInstance("n1", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("n1", 1), LARGE_NODE); + Assert.assertEquals(mgr.getResourceWithLabel("p1"), + Resources.add(SMALL_NODE, LARGE_NODE)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p1"), 1); + + // change the large NM to small, check if resource updated + mgr.updateNodeResource(NodeId.newInstance("n1", 1), SMALL_NODE); + Assert.assertEquals(mgr.getResourceWithLabel("p1"), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p1"), 1); + + // deactive one NM, and check if resource updated + mgr.deactivateNode(NodeId.newInstance("n1", 1)); + Assert.assertEquals(mgr.getResourceWithLabel("p1"), SMALL_NODE); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p1"), 1); + + // continus deactive, check if resource updated + mgr.deactivateNode(NodeId.newInstance("n1", 0)); + Assert.assertEquals(mgr.getResourceWithLabel("p1"), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p1"), 0); + + // Add two NM to n1 back + mgr.activatedNode(NodeId.newInstance("n1", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("n1", 1), LARGE_NODE); + + // And remove p1, now the two NM should come to default label, + mgr.removeNodeLabel("p1"); + Assert.assertEquals(mgr.getResourceWithLabel(NodeLabelsManager.NO_LABEL), + Resources.add(SMALL_NODE, LARGE_NODE)); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 5000) + public void testUpdateNodeLabelWithActiveNode() throws Exception { + mgr.addNodeLabels(toSet("p1", "p2", "p3")); + mgr.setLabelsOnSingleNode("n1", toSet("p1")); + mgr.setLabelsOnSingleNode("n2", toSet("p2")); + mgr.setLabelsOnSingleNode("n3", toSet("p3")); + + // active two NM to n1, one large and one small + mgr.activatedNode(NodeId.newInstance("n1", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("n2", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("n3", 0), SMALL_NODE); + + // change label of n1 to p2 + mgr.setLabelsOnSingleNode("n1", toSet("p2")); + Assert.assertEquals(mgr.getResourceWithLabel("p1"), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p1"), 0); + Assert.assertEquals(mgr.getResourceWithLabel("p2"), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p2"), 2); + Assert.assertEquals(mgr.getResourceWithLabel("p3"), SMALL_NODE); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p3"), 1); + + // add more labels + mgr.addNodeLabels(toSet("p4", "p5", "p6")); + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n4", toSet("p1"), "n5", + toSet("p2"), "n6", toSet("p3"), "n7", toSet("p4"), "n8", toSet("p5"))); + + // now node -> label is, + // p1 : n4 + // p2 : n1, n2, n5 + // p3 : n3, n6 + // p4 : n7 + // p5 : n8 + // no-label : n9 + + // active these nodes + mgr.activatedNode(NodeId.newInstance("n4", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("n5", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("n6", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("n7", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("n8", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("n9", 0), SMALL_NODE); + + // check varibles + Assert.assertEquals(mgr.getResourceWithLabel("p1"), SMALL_NODE); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p1"), 1); + Assert.assertEquals(mgr.getResourceWithLabel("p2"), + Resources.multiply(SMALL_NODE, 3)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p2"), 3); + Assert.assertEquals(mgr.getResourceWithLabel("p3"), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p3"), 2); + Assert.assertEquals(mgr.getResourceWithLabel("p4"), + Resources.multiply(SMALL_NODE, 1)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p4"), 1); + Assert.assertEquals(mgr.getResourceWithLabel("p5"), + Resources.multiply(SMALL_NODE, 1)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p5"), 1); + Assert.assertEquals(mgr.getResourceWithLabel(""), + Resources.multiply(SMALL_NODE, 1)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel(""), 1); + + // change a bunch of nodes -> labels + // n4 -> p2 + // n7 -> empty + // n5 -> p1 + // n8 -> empty + // n9 -> p1 + // + // now become: + // p1 : n5, n9 + // p2 : n1, n2, n4 + // p3 : n3, n6 + // p4 : [ ] + // p5 : [ ] + // no label: n8, n7 + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n4", toSet("p2"), "n7", + NodeLabelsManager.EMPTY_STRING_SET, "n5", toSet("p1"), "n8", + NodeLabelsManager.EMPTY_STRING_SET, "n9", toSet("p1"))); + + // check varibles + Assert.assertEquals(mgr.getResourceWithLabel("p1"), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p1"), 2); + Assert.assertEquals(mgr.getResourceWithLabel("p2"), + Resources.multiply(SMALL_NODE, 3)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p2"), 3); + Assert.assertEquals(mgr.getResourceWithLabel("p3"), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p3"), 2); + Assert.assertEquals(mgr.getResourceWithLabel("p4"), + Resources.multiply(SMALL_NODE, 0)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p4"), 0); + Assert.assertEquals(mgr.getResourceWithLabel("p5"), + Resources.multiply(SMALL_NODE, 0)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel("p5"), 0); + Assert.assertEquals(mgr.getResourceWithLabel(""), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfActiveNodesByLabel(""), 2); + } + + @Test + public void testGetQueueResource() throws Exception { + Resource clusterResource = Resource.newInstance(9999, 1); + + /* + * Node->Labels: + * host1 : red, blue + * host2 : blue, yellow + * host3 : yellow + * host4 : + */ + mgr.addNodeLabels(toSet("red", "blue", "yellow")); + mgr.setLabelsOnSingleNode("host1", toSet("red", "blue")); + mgr.setLabelsOnSingleNode("host2", toSet("blue", "yellow")); + mgr.setLabelsOnSingleNode("host3", toSet("yellow")); + + // active two NM to n1, one large and one small + mgr.activatedNode(NodeId.newInstance("host1", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("host2", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("host3", 0), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("host4", 0), SMALL_NODE); + + // reinitialize queue + Set q1Label = toSet("red", "blue"); + Set q2Label = toSet("blue", "yellow"); + Set q3Label = toSet("yellow"); + Set q4Label = NodeLabelsManager.EMPTY_STRING_SET; + Set q5Label = toSet(NodeLabelsManager.ANY); + + Map> queueToLabels = new HashMap>(); + queueToLabels.put("Q1", q1Label); + queueToLabels.put("Q2", q2Label); + queueToLabels.put("Q3", q3Label); + queueToLabels.put("Q4", q4Label); + queueToLabels.put("Q5", q5Label); + + mgr.reinitializeQueueLabels(queueToLabels); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 4), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 1), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Check resource after changes some labels + * Node->Labels: + * host1 : blue + * host2 : + * host3 : red, yellow + * host4 : + */ + mgr.setLabelsOnMultipleNodes(ImmutableMap.of( + "host3", toSet("red", "yellow"), + "host1", toSet("blue"), + "host2", NodeLabelsManager.EMPTY_STRING_SET)); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 4), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 4), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Check resource after deactive/active some nodes + * Node->Labels: + * (deactived) host1 : blue + * host2 : + * (deactived and then actived) host3 : red, yellow + * host4 : + */ + mgr.deactivateNode(NodeId.newInstance("host1", 0)); + mgr.deactivateNode(NodeId.newInstance("host3", 0)); + mgr.activatedNode(NodeId.newInstance("host3", 0), SMALL_NODE); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Check resource after refresh queue: + * Q1: blue + * Q2: red, blue + * Q3: red + * Q4: + * Q5: ANY + */ + q1Label = toSet("blue"); + q2Label = toSet("blue", "red"); + q3Label = toSet("red"); + q4Label = NodeLabelsManager.EMPTY_STRING_SET; + q5Label = toSet(NodeLabelsManager.ANY); + + queueToLabels.clear(); + queueToLabels.put("Q1", q1Label); + queueToLabels.put("Q2", q2Label); + queueToLabels.put("Q3", q3Label); + queueToLabels.put("Q4", q4Label); + queueToLabels.put("Q5", q5Label); + + mgr.reinitializeQueueLabels(queueToLabels); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Active NMs in nodes already have NM + * Node->Labels: + * host2 : + * host3 : red, yellow (3 NMs) + * host4 : (2 NMs) + */ + mgr.activatedNode(NodeId.newInstance("host3", 1), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("host3", 2), SMALL_NODE); + mgr.activatedNode(NodeId.newInstance("host4", 1), SMALL_NODE); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 6), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 6), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Deactive NMs in nodes already have NMs + * Node->Labels: + * host2 : + * host3 : red, yellow (2 NMs) + * host4 : (0 NMs) + */ + mgr.deactivateNode(NodeId.newInstance("host3", 2)); + mgr.deactivateNode(NodeId.newInstance("host4", 1)); + mgr.deactivateNode(NodeId.newInstance("host4", 0)); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 1), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 1), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + } +}