diff --git llap-server/bin/llap-daemon-env.sh llap-server/bin/llap-daemon-env.sh
new file mode 100644
index 0000000..40e6d9b
--- /dev/null
+++ llap-server/bin/llap-daemon-env.sh
@@ -0,0 +1,11 @@
+# Required
+#export LLAP_DAEMON_HOME=
+# TODO Remove this.
+#export LLAP_DAEMON_BIN_HOME=
+
+# Optional
+#export LLAP_DAEMON_LOG_DIR=
+#export LLAP_DAEMON_PID_DIR=
+#export LLAP_DAEMON_USER_CLASSPATH=
+#export LLAP_DAEMON_OPTS=
+#export LLAP_DAEMON_LOGGER=INFO,console
diff --git llap-server/bin/llapDaemon.sh llap-server/bin/llapDaemon.sh
new file mode 100755
index 0000000..afb6bc5
--- /dev/null
+++ llap-server/bin/llapDaemon.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Runs a yarn command as a daemon.
+#
+# Environment Variables
+#
+# LLAP_DAEMON_HOME - Directory with jars
+# LLAP_DAEMON_BIN_HOME - Directory with binaries
+# LLAP_DAEMON_CONF_DIR Conf dir for llap-daemon-site.xml
+# LLAP_DAEMON_LOG_DIR - defaults to /tmp
+# LLAP_DAEMON_PID_DIR The pid files are stored. /tmp by default.
+# LLAP_DAEMON_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+#set -x
+
+usage="Usage: llap-daemon.sh (start|stop) "
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+# get arguments
+startStop=$1
+shift
+
+
+rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ "${LLAP_DAEMON_CONF_DIR}" = "" ] ; then
+ echo "LLAP_DAEMON_CONF_DIR must be specified"
+ exit 1
+fi
+
+if [ -f "${LLAP_DAEMON_CONF_DIR}/llap-daemon-env.sh" ] ; then
+ . "${LLAP_DAEMON_CONF_DIR}/llap-daemon-env.sh"
+fi
+
+# get log directory
+if [ "$LLAP_DAEMON_LOG_DIR" = "" ]; then
+ export LLAP_DAEMON_LOG_DIR="/tmp/llapDaemonLogs"
+fi
+
+if [ ! -w "$LLAP_DAEMON_LOG_DIR" ] ; then
+ mkdir -p "$LLAP_DAEMON_LOG_DIR"
+ chown $USER $LLAP_DAEMON_LOG_DIR
+fi
+
+if [ "$LLAP_DAEMON_PID_DIR" = "" ]; then
+ LLAP_DAEMON_PID_DIR=/tmp
+fi
+
+# some variables
+LLAP_DAEMON_LOG_BASE=llap-daemon-$USER-$HOSTNAME
+export LLAP_DAEMON_LOG_FILE=$LLAP_DAEMON_LOG_BASE.log
+if [ ! -n "${LLAP_DAEMON_LOGGER}" ]; then
+ echo "LLAP_DAEMON_LOGGER not defined... using defaults"
+ LLAP_DAEMON_LOGGER=${LOG_LEVEL_DEFAULT}
+fi
+logLog=$LLAP_DAEMON_LOG_DIR/$LLAP_DAEMON_LOG_BASE.log
+logOut=$LLAP_DAEMON_LOG_DIR/$LLAP_DAEMON_LOG_BASE.out
+pid=$LLAP_DAEMON_PID_DIR/llap-daemon-$USER.pid
+LLAP_DAEMON_STOP_TIMEOUT=${LLAP_DAEMON_STOP_TIMEOUT:-2}
+
+# Set default scheduling priority
+if [ "$LLAP_DAEMON_NICENESS" = "" ]; then
+ export LLAP_DAEMON_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ [ -w "$LLAP_DAEMON_PID_DIR" ] || mkdir -p "$LLAP_DAEMON_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo llapdaemon running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ #rotate_log $logLog
+ #rotate_log $logOut
+ echo starting llapdaemon, logging to $logLog and $logOut
+ nohup nice -n $LLAP_DAEMON_NICENESS "$LLAP_DAEMON_BIN_HOME"/bin/runLlapDaemon.sh run > "$logOut" 2>&1 < /dev/null &
+ echo $! > $pid
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ TARGET_PID=`cat $pid`
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo stopping llapDaemon
+ kill $TARGET_PID
+ sleep $LLAP_DAEMON_STOP_TIMEOUT
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo "llapDaemon did not stop gracefully after $LLAP_DAEMON_STOP_TIMEOUT seconds: killing with kill -9"
+ kill -9 $TARGET_PID
+ fi
+ else
+ echo no llapDaemon to stop
+ fi
+ rm -f $pid
+ else
+ echo no llapDaemon to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
+
+
diff --git llap-server/bin/runLlapDaemon.sh llap-server/bin/runLlapDaemon.sh
new file mode 100755
index 0000000..88ecdb3
--- /dev/null
+++ llap-server/bin/runLlapDaemon.sh
@@ -0,0 +1,111 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Environment Variables
+# LLAP_DAEMON_HOME
+# LLAP_DAEMON_USER_CLASSPATH
+# LLAP_DAEMON_HEAPSIZE - MB
+# LLAP_DAEMON_OPTS - additional options
+# LLAP_DAEMON_LOGGER - default is INFO,console
+# LLAP_DAEMON_LOG_DIR - defaults to /tmp
+# LLAP_DAEMON_LOG_FILE -
+# LLAP_DAEMON_CONF_DIR
+
+function print_usage() {
+ echo "Usage: llap-daemon.sh [COMMAND]"
+ echo "Commands: "
+ echo " classpath print classpath"
+ echo " run run the daemon"
+}
+
+# if no args specified, show usage
+if [ $# = 0 ]; then
+ print_usage
+ exit 1
+fi
+
+# get arguments
+COMMAND=$1
+shift
+
+
+JAVA=$JAVA_HOME/bin/java
+LOG_LEVEL_DEFAULT="INFO,console"
+JAVA_OPTS_BASE="-server -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps"
+
+# CLASSPATH initially contains $HADOOP_CONF_DIR & $YARN_CONF_DIR
+if [ ! -d "$HADOOP_CONF_DIR" ]; then
+ echo No HADOOP_CONF_DIR set, or is not a directory.
+ echo Please specify it in the environment.
+ exit 1
+fi
+
+if [ ! -d "${LLAP_DAEMON_HOME}" ]; then
+ echo No LLAP_DAEMON_HOME set, or is not a directory.
+ echo Please specify it in the environment.
+ exit 1
+fi
+
+if [ ! -d "${LLAP_DAEMON_CONF_DIR}" ]; then
+ echo No LLAP_DAEMON_CONF_DIR set, or is not a directory.
+ echo Please specify it in the environment.
+ exit 1
+fi
+
+if [ ! -n "${LLAP_DAEMON_LOGGER}" ]; then
+ echo "LLAP_DAEMON_LOGGER not defined... using defaults"
+ LLAP_DAEMON_LOGGER=${LOG_LEVEL_DEFAULT}
+fi
+
+CLASSPATH=${LLAP_DAEMON_CONF_DIR}:${LLAP_DAEMON_HOME}/*:${LLAP_DAEMON_HOME}/lib/*:`${HADOOP_PREFIX}/bin/hadoop classpath`:.
+
+if [ -n "LLAP_DAEMON_USER_CLASSPATH" ]; then
+ CLASSPATH=${CLASSPATH}:${LLAP_DAEMON_USER_CLASSPATH}
+fi
+
+if [ ! -n "${LLAP_DAEMON_LOG_DIR}" ]; then
+ echo "LLAP_DAEMON_LOG_DIR not defined. Using default"
+ LLAP_DAEMON_LOG_DIR="/tmp/llapDaemonLogs"
+fi
+
+if [ "$LLAP_DAEMON_LOGFILE" = "" ]; then
+ LLAP_DAEMON_LOG_FILE='llapdaemon.log'
+fi
+
+if [ "$LLAP_DAEMON_HEAPSIZE" = "" ]; then
+ LLAP_DAEMON_HEAPSIZE=4096
+fi
+
+# Figure out classes based on the command
+
+if [ "$COMMAND" = "classpath" ] ; then
+ echo $CLASSPATH
+ exit
+elif [ "$COMMAND" = "run" ] ; then
+ CLASS='org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon'
+fi
+
+LLAP_DAEMON_OPTS="${LLAP_DAEMON_OPTS} ${JAVA_OPTS_BASE}"
+LLAP_DAEMON_OPTS="${LLAP_DAEMON_OPTS} -Dlog4j.configuration=llap-daemon-log4j.properties"
+LLAP_DAEMON_OPTS="${LLAP_DAEMON_OPTS} -Dllap.daemon.log.dir=${LLAP_DAEMON_LOG_DIR}"
+LLAP_DAEMON_OPTS="${LLAP_DAEMON_OPTS} -Dllap.daemon.log.file=${LLAP_DAEMON_LOG_FILE}"
+LLAP_DAEMON_OPTS="${LLAP_DAEMON_OPTS} -Dllap.daemon.root.logger=${LLAP_DAEMON_LOGGER}"
+
+exec "$JAVA" -Dproc_llapdaemon -Xms${LLAP_DAEMON_HEAPSIZE}m -Xmx${LLAP_DAEMON_HEAPSIZE}m ${LLAP_DAEMON_OPTS} -classpath "$CLASSPATH" $CLASS "$@"
+
+
diff --git llap-server/pom.xml llap-server/pom.xml
index 3cb24c7..27f465da 100644
--- llap-server/pom.xml
+++ llap-server/pom.xml
@@ -159,8 +159,59 @@
tests
test
+
+ org.apache.tez
+ tez-runtime-internals
+ ${tez.version}
+ true
+
+
+ org.apache.tez
+ tez-runtime-library
+ ${tez.version}
+ true
+
+
+ org.apache.tez
+ tez-dag
+ ${tez.version}
+ true
+
+
+ protobuf
+
+
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ generate-protobuf-sources
+ generate-sources
+
+
+
+
+ Building LLAP-Server Protobuf
+
+
+
+
+
+
+
+
+
+ run
+
+
+
+
+
+
+
diff --git llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
new file mode 100644
index 0000000..66122c1
--- /dev/null
+++ llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
@@ -0,0 +1,2082 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: LlapDaemonProtocol.proto
+
+package org.apache.hadoop.hive.llap.daemon.rpc;
+
+public final class LlapDaemonProtocolProtos {
+ private LlapDaemonProtocolProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface RunContainerRequestProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string container_id_string = 1;
+ /**
+ * optional string container_id_string = 1;
+ */
+ boolean hasContainerIdString();
+ /**
+ * optional string container_id_string = 1;
+ */
+ java.lang.String getContainerIdString();
+ /**
+ * optional string container_id_string = 1;
+ */
+ com.google.protobuf.ByteString
+ getContainerIdStringBytes();
+
+ // optional string am_host = 2;
+ /**
+ * optional string am_host = 2;
+ */
+ boolean hasAmHost();
+ /**
+ * optional string am_host = 2;
+ */
+ java.lang.String getAmHost();
+ /**
+ * optional string am_host = 2;
+ */
+ com.google.protobuf.ByteString
+ getAmHostBytes();
+
+ // optional int32 am_port = 3;
+ /**
+ * optional int32 am_port = 3;
+ */
+ boolean hasAmPort();
+ /**
+ * optional int32 am_port = 3;
+ */
+ int getAmPort();
+
+ // optional string token_identifier = 4;
+ /**
+ * optional string token_identifier = 4;
+ */
+ boolean hasTokenIdentifier();
+ /**
+ * optional string token_identifier = 4;
+ */
+ java.lang.String getTokenIdentifier();
+ /**
+ * optional string token_identifier = 4;
+ */
+ com.google.protobuf.ByteString
+ getTokenIdentifierBytes();
+
+ // optional bytes credentials_binary = 5;
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ boolean hasCredentialsBinary();
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ com.google.protobuf.ByteString getCredentialsBinary();
+
+ // optional string user = 6;
+ /**
+ * optional string user = 6;
+ */
+ boolean hasUser();
+ /**
+ * optional string user = 6;
+ */
+ java.lang.String getUser();
+ /**
+ * optional string user = 6;
+ */
+ com.google.protobuf.ByteString
+ getUserBytes();
+
+ // optional string application_id_string = 7;
+ /**
+ * optional string application_id_string = 7;
+ */
+ boolean hasApplicationIdString();
+ /**
+ * optional string application_id_string = 7;
+ */
+ java.lang.String getApplicationIdString();
+ /**
+ * optional string application_id_string = 7;
+ */
+ com.google.protobuf.ByteString
+ getApplicationIdStringBytes();
+
+ // optional int32 app_attempt_number = 8;
+ /**
+ * optional int32 app_attempt_number = 8;
+ */
+ boolean hasAppAttemptNumber();
+ /**
+ * optional int32 app_attempt_number = 8;
+ */
+ int getAppAttemptNumber();
+ }
+ /**
+ * Protobuf type {@code RunContainerRequestProto}
+ */
+ public static final class RunContainerRequestProto extends
+ com.google.protobuf.GeneratedMessage
+ implements RunContainerRequestProtoOrBuilder {
+ // Use RunContainerRequestProto.newBuilder() to construct.
+ private RunContainerRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private RunContainerRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final RunContainerRequestProto defaultInstance;
+ public static RunContainerRequestProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public RunContainerRequestProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private RunContainerRequestProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ containerIdString_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ amHost_ = input.readBytes();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ amPort_ = input.readInt32();
+ break;
+ }
+ case 34: {
+ bitField0_ |= 0x00000008;
+ tokenIdentifier_ = input.readBytes();
+ break;
+ }
+ case 42: {
+ bitField0_ |= 0x00000010;
+ credentialsBinary_ = input.readBytes();
+ break;
+ }
+ case 50: {
+ bitField0_ |= 0x00000020;
+ user_ = input.readBytes();
+ break;
+ }
+ case 58: {
+ bitField0_ |= 0x00000040;
+ applicationIdString_ = input.readBytes();
+ break;
+ }
+ case 64: {
+ bitField0_ |= 0x00000080;
+ appAttemptNumber_ = input.readInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerRequestProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerRequestProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public RunContainerRequestProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new RunContainerRequestProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string container_id_string = 1;
+ public static final int CONTAINER_ID_STRING_FIELD_NUMBER = 1;
+ private java.lang.Object containerIdString_;
+ /**
+ * optional string container_id_string = 1;
+ */
+ public boolean hasContainerIdString() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string container_id_string = 1;
+ */
+ public java.lang.String getContainerIdString() {
+ java.lang.Object ref = containerIdString_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ containerIdString_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string container_id_string = 1;
+ */
+ public com.google.protobuf.ByteString
+ getContainerIdStringBytes() {
+ java.lang.Object ref = containerIdString_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ containerIdString_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string am_host = 2;
+ public static final int AM_HOST_FIELD_NUMBER = 2;
+ private java.lang.Object amHost_;
+ /**
+ * optional string am_host = 2;
+ */
+ public boolean hasAmHost() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string am_host = 2;
+ */
+ public java.lang.String getAmHost() {
+ java.lang.Object ref = amHost_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ amHost_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string am_host = 2;
+ */
+ public com.google.protobuf.ByteString
+ getAmHostBytes() {
+ java.lang.Object ref = amHost_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ amHost_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int32 am_port = 3;
+ public static final int AM_PORT_FIELD_NUMBER = 3;
+ private int amPort_;
+ /**
+ * optional int32 am_port = 3;
+ */
+ public boolean hasAmPort() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int32 am_port = 3;
+ */
+ public int getAmPort() {
+ return amPort_;
+ }
+
+ // optional string token_identifier = 4;
+ public static final int TOKEN_IDENTIFIER_FIELD_NUMBER = 4;
+ private java.lang.Object tokenIdentifier_;
+ /**
+ * optional string token_identifier = 4;
+ */
+ public boolean hasTokenIdentifier() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string token_identifier = 4;
+ */
+ public java.lang.String getTokenIdentifier() {
+ java.lang.Object ref = tokenIdentifier_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ tokenIdentifier_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string token_identifier = 4;
+ */
+ public com.google.protobuf.ByteString
+ getTokenIdentifierBytes() {
+ java.lang.Object ref = tokenIdentifier_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ tokenIdentifier_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional bytes credentials_binary = 5;
+ public static final int CREDENTIALS_BINARY_FIELD_NUMBER = 5;
+ private com.google.protobuf.ByteString credentialsBinary_;
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ public boolean hasCredentialsBinary() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ public com.google.protobuf.ByteString getCredentialsBinary() {
+ return credentialsBinary_;
+ }
+
+ // optional string user = 6;
+ public static final int USER_FIELD_NUMBER = 6;
+ private java.lang.Object user_;
+ /**
+ * optional string user = 6;
+ */
+ public boolean hasUser() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * optional string user = 6;
+ */
+ public java.lang.String getUser() {
+ java.lang.Object ref = user_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ user_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string user = 6;
+ */
+ public com.google.protobuf.ByteString
+ getUserBytes() {
+ java.lang.Object ref = user_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ user_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string application_id_string = 7;
+ public static final int APPLICATION_ID_STRING_FIELD_NUMBER = 7;
+ private java.lang.Object applicationIdString_;
+ /**
+ * optional string application_id_string = 7;
+ */
+ public boolean hasApplicationIdString() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * optional string application_id_string = 7;
+ */
+ public java.lang.String getApplicationIdString() {
+ java.lang.Object ref = applicationIdString_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ applicationIdString_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string application_id_string = 7;
+ */
+ public com.google.protobuf.ByteString
+ getApplicationIdStringBytes() {
+ java.lang.Object ref = applicationIdString_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ applicationIdString_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int32 app_attempt_number = 8;
+ public static final int APP_ATTEMPT_NUMBER_FIELD_NUMBER = 8;
+ private int appAttemptNumber_;
+ /**
+ * optional int32 app_attempt_number = 8;
+ */
+ public boolean hasAppAttemptNumber() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * optional int32 app_attempt_number = 8;
+ */
+ public int getAppAttemptNumber() {
+ return appAttemptNumber_;
+ }
+
+ private void initFields() {
+ containerIdString_ = "";
+ amHost_ = "";
+ amPort_ = 0;
+ tokenIdentifier_ = "";
+ credentialsBinary_ = com.google.protobuf.ByteString.EMPTY;
+ user_ = "";
+ applicationIdString_ = "";
+ appAttemptNumber_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getContainerIdStringBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getAmHostBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt32(3, amPort_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, getTokenIdentifierBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeBytes(5, credentialsBinary_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeBytes(6, getUserBytes());
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeBytes(7, getApplicationIdStringBytes());
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ output.writeInt32(8, appAttemptNumber_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getContainerIdStringBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getAmHostBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, amPort_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, getTokenIdentifierBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(5, credentialsBinary_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(6, getUserBytes());
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(7, getApplicationIdStringBytes());
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(8, appAttemptNumber_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto) obj;
+
+ boolean result = true;
+ result = result && (hasContainerIdString() == other.hasContainerIdString());
+ if (hasContainerIdString()) {
+ result = result && getContainerIdString()
+ .equals(other.getContainerIdString());
+ }
+ result = result && (hasAmHost() == other.hasAmHost());
+ if (hasAmHost()) {
+ result = result && getAmHost()
+ .equals(other.getAmHost());
+ }
+ result = result && (hasAmPort() == other.hasAmPort());
+ if (hasAmPort()) {
+ result = result && (getAmPort()
+ == other.getAmPort());
+ }
+ result = result && (hasTokenIdentifier() == other.hasTokenIdentifier());
+ if (hasTokenIdentifier()) {
+ result = result && getTokenIdentifier()
+ .equals(other.getTokenIdentifier());
+ }
+ result = result && (hasCredentialsBinary() == other.hasCredentialsBinary());
+ if (hasCredentialsBinary()) {
+ result = result && getCredentialsBinary()
+ .equals(other.getCredentialsBinary());
+ }
+ result = result && (hasUser() == other.hasUser());
+ if (hasUser()) {
+ result = result && getUser()
+ .equals(other.getUser());
+ }
+ result = result && (hasApplicationIdString() == other.hasApplicationIdString());
+ if (hasApplicationIdString()) {
+ result = result && getApplicationIdString()
+ .equals(other.getApplicationIdString());
+ }
+ result = result && (hasAppAttemptNumber() == other.hasAppAttemptNumber());
+ if (hasAppAttemptNumber()) {
+ result = result && (getAppAttemptNumber()
+ == other.getAppAttemptNumber());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasContainerIdString()) {
+ hash = (37 * hash) + CONTAINER_ID_STRING_FIELD_NUMBER;
+ hash = (53 * hash) + getContainerIdString().hashCode();
+ }
+ if (hasAmHost()) {
+ hash = (37 * hash) + AM_HOST_FIELD_NUMBER;
+ hash = (53 * hash) + getAmHost().hashCode();
+ }
+ if (hasAmPort()) {
+ hash = (37 * hash) + AM_PORT_FIELD_NUMBER;
+ hash = (53 * hash) + getAmPort();
+ }
+ if (hasTokenIdentifier()) {
+ hash = (37 * hash) + TOKEN_IDENTIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getTokenIdentifier().hashCode();
+ }
+ if (hasCredentialsBinary()) {
+ hash = (37 * hash) + CREDENTIALS_BINARY_FIELD_NUMBER;
+ hash = (53 * hash) + getCredentialsBinary().hashCode();
+ }
+ if (hasUser()) {
+ hash = (37 * hash) + USER_FIELD_NUMBER;
+ hash = (53 * hash) + getUser().hashCode();
+ }
+ if (hasApplicationIdString()) {
+ hash = (37 * hash) + APPLICATION_ID_STRING_FIELD_NUMBER;
+ hash = (53 * hash) + getApplicationIdString().hashCode();
+ }
+ if (hasAppAttemptNumber()) {
+ hash = (37 * hash) + APP_ATTEMPT_NUMBER_FIELD_NUMBER;
+ hash = (53 * hash) + getAppAttemptNumber();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code RunContainerRequestProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerRequestProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerRequestProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ containerIdString_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ amHost_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ amPort_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ tokenIdentifier_ = "";
+ bitField0_ = (bitField0_ & ~0x00000008);
+ credentialsBinary_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ user_ = "";
+ bitField0_ = (bitField0_ & ~0x00000020);
+ applicationIdString_ = "";
+ bitField0_ = (bitField0_ & ~0x00000040);
+ appAttemptNumber_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000080);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerRequestProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.containerIdString_ = containerIdString_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.amHost_ = amHost_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.amPort_ = amPort_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.tokenIdentifier_ = tokenIdentifier_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.credentialsBinary_ = credentialsBinary_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.user_ = user_;
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.applicationIdString_ = applicationIdString_;
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+ to_bitField0_ |= 0x00000080;
+ }
+ result.appAttemptNumber_ = appAttemptNumber_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.getDefaultInstance()) return this;
+ if (other.hasContainerIdString()) {
+ bitField0_ |= 0x00000001;
+ containerIdString_ = other.containerIdString_;
+ onChanged();
+ }
+ if (other.hasAmHost()) {
+ bitField0_ |= 0x00000002;
+ amHost_ = other.amHost_;
+ onChanged();
+ }
+ if (other.hasAmPort()) {
+ setAmPort(other.getAmPort());
+ }
+ if (other.hasTokenIdentifier()) {
+ bitField0_ |= 0x00000008;
+ tokenIdentifier_ = other.tokenIdentifier_;
+ onChanged();
+ }
+ if (other.hasCredentialsBinary()) {
+ setCredentialsBinary(other.getCredentialsBinary());
+ }
+ if (other.hasUser()) {
+ bitField0_ |= 0x00000020;
+ user_ = other.user_;
+ onChanged();
+ }
+ if (other.hasApplicationIdString()) {
+ bitField0_ |= 0x00000040;
+ applicationIdString_ = other.applicationIdString_;
+ onChanged();
+ }
+ if (other.hasAppAttemptNumber()) {
+ setAppAttemptNumber(other.getAppAttemptNumber());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string container_id_string = 1;
+ private java.lang.Object containerIdString_ = "";
+ /**
+ * optional string container_id_string = 1;
+ */
+ public boolean hasContainerIdString() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string container_id_string = 1;
+ */
+ public java.lang.String getContainerIdString() {
+ java.lang.Object ref = containerIdString_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ containerIdString_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string container_id_string = 1;
+ */
+ public com.google.protobuf.ByteString
+ getContainerIdStringBytes() {
+ java.lang.Object ref = containerIdString_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ containerIdString_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string container_id_string = 1;
+ */
+ public Builder setContainerIdString(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ containerIdString_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string container_id_string = 1;
+ */
+ public Builder clearContainerIdString() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ containerIdString_ = getDefaultInstance().getContainerIdString();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string container_id_string = 1;
+ */
+ public Builder setContainerIdStringBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ containerIdString_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string am_host = 2;
+ private java.lang.Object amHost_ = "";
+ /**
+ * optional string am_host = 2;
+ */
+ public boolean hasAmHost() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string am_host = 2;
+ */
+ public java.lang.String getAmHost() {
+ java.lang.Object ref = amHost_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ amHost_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string am_host = 2;
+ */
+ public com.google.protobuf.ByteString
+ getAmHostBytes() {
+ java.lang.Object ref = amHost_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ amHost_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string am_host = 2;
+ */
+ public Builder setAmHost(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ amHost_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string am_host = 2;
+ */
+ public Builder clearAmHost() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ amHost_ = getDefaultInstance().getAmHost();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string am_host = 2;
+ */
+ public Builder setAmHostBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ amHost_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 am_port = 3;
+ private int amPort_ ;
+ /**
+ * optional int32 am_port = 3;
+ */
+ public boolean hasAmPort() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int32 am_port = 3;
+ */
+ public int getAmPort() {
+ return amPort_;
+ }
+ /**
+ * optional int32 am_port = 3;
+ */
+ public Builder setAmPort(int value) {
+ bitField0_ |= 0x00000004;
+ amPort_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 am_port = 3;
+ */
+ public Builder clearAmPort() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ amPort_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional string token_identifier = 4;
+ private java.lang.Object tokenIdentifier_ = "";
+ /**
+ * optional string token_identifier = 4;
+ */
+ public boolean hasTokenIdentifier() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string token_identifier = 4;
+ */
+ public java.lang.String getTokenIdentifier() {
+ java.lang.Object ref = tokenIdentifier_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ tokenIdentifier_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string token_identifier = 4;
+ */
+ public com.google.protobuf.ByteString
+ getTokenIdentifierBytes() {
+ java.lang.Object ref = tokenIdentifier_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ tokenIdentifier_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string token_identifier = 4;
+ */
+ public Builder setTokenIdentifier(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ tokenIdentifier_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string token_identifier = 4;
+ */
+ public Builder clearTokenIdentifier() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ tokenIdentifier_ = getDefaultInstance().getTokenIdentifier();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string token_identifier = 4;
+ */
+ public Builder setTokenIdentifierBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ tokenIdentifier_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional bytes credentials_binary = 5;
+ private com.google.protobuf.ByteString credentialsBinary_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ public boolean hasCredentialsBinary() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ public com.google.protobuf.ByteString getCredentialsBinary() {
+ return credentialsBinary_;
+ }
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ public Builder setCredentialsBinary(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ credentialsBinary_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ public Builder clearCredentialsBinary() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ credentialsBinary_ = getDefaultInstance().getCredentialsBinary();
+ onChanged();
+ return this;
+ }
+
+ // optional string user = 6;
+ private java.lang.Object user_ = "";
+ /**
+ * optional string user = 6;
+ */
+ public boolean hasUser() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * optional string user = 6;
+ */
+ public java.lang.String getUser() {
+ java.lang.Object ref = user_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ user_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string user = 6;
+ */
+ public com.google.protobuf.ByteString
+ getUserBytes() {
+ java.lang.Object ref = user_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ user_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string user = 6;
+ */
+ public Builder setUser(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000020;
+ user_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string user = 6;
+ */
+ public Builder clearUser() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ user_ = getDefaultInstance().getUser();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string user = 6;
+ */
+ public Builder setUserBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000020;
+ user_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string application_id_string = 7;
+ private java.lang.Object applicationIdString_ = "";
+ /**
+ * optional string application_id_string = 7;
+ */
+ public boolean hasApplicationIdString() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * optional string application_id_string = 7;
+ */
+ public java.lang.String getApplicationIdString() {
+ java.lang.Object ref = applicationIdString_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ applicationIdString_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string application_id_string = 7;
+ */
+ public com.google.protobuf.ByteString
+ getApplicationIdStringBytes() {
+ java.lang.Object ref = applicationIdString_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ applicationIdString_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string application_id_string = 7;
+ */
+ public Builder setApplicationIdString(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000040;
+ applicationIdString_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string application_id_string = 7;
+ */
+ public Builder clearApplicationIdString() {
+ bitField0_ = (bitField0_ & ~0x00000040);
+ applicationIdString_ = getDefaultInstance().getApplicationIdString();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string application_id_string = 7;
+ */
+ public Builder setApplicationIdStringBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000040;
+ applicationIdString_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 app_attempt_number = 8;
+ private int appAttemptNumber_ ;
+ /**
+ * optional int32 app_attempt_number = 8;
+ */
+ public boolean hasAppAttemptNumber() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * optional int32 app_attempt_number = 8;
+ */
+ public int getAppAttemptNumber() {
+ return appAttemptNumber_;
+ }
+ /**
+ * optional int32 app_attempt_number = 8;
+ */
+ public Builder setAppAttemptNumber(int value) {
+ bitField0_ |= 0x00000080;
+ appAttemptNumber_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 app_attempt_number = 8;
+ */
+ public Builder clearAppAttemptNumber() {
+ bitField0_ = (bitField0_ & ~0x00000080);
+ appAttemptNumber_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:RunContainerRequestProto)
+ }
+
+ static {
+ defaultInstance = new RunContainerRequestProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:RunContainerRequestProto)
+ }
+
+ public interface RunContainerResponseProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code RunContainerResponseProto}
+ */
+ public static final class RunContainerResponseProto extends
+ com.google.protobuf.GeneratedMessage
+ implements RunContainerResponseProtoOrBuilder {
+ // Use RunContainerResponseProto.newBuilder() to construct.
+ private RunContainerResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private RunContainerResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final RunContainerResponseProto defaultInstance;
+ public static RunContainerResponseProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public RunContainerResponseProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private RunContainerResponseProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerResponseProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerResponseProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public RunContainerResponseProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new RunContainerResponseProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code RunContainerResponseProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerResponseProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerResponseProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerResponseProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:RunContainerResponseProto)
+ }
+
+ static {
+ defaultInstance = new RunContainerResponseProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:RunContainerResponseProto)
+ }
+
+ /**
+ * Protobuf service {@code LlapDaemonProtocol}
+ */
+ public static abstract class LlapDaemonProtocol
+ implements com.google.protobuf.Service {
+ protected LlapDaemonProtocol() {}
+
+ public interface Interface {
+ /**
+ * rpc runContainer(.RunContainerRequestProto) returns (.RunContainerResponseProto);
+ */
+ public abstract void runContainer(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
+ }
+
+ public static com.google.protobuf.Service newReflectiveService(
+ final Interface impl) {
+ return new LlapDaemonProtocol() {
+ @java.lang.Override
+ public void runContainer(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.runContainer(controller, request, done);
+ }
+
+ };
+ }
+
+ public static com.google.protobuf.BlockingService
+ newReflectiveBlockingService(final BlockingInterface impl) {
+ return new com.google.protobuf.BlockingService() {
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final com.google.protobuf.Message callBlockingMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request)
+ throws com.google.protobuf.ServiceException {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callBlockingMethod() given method descriptor for " +
+ "wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return impl.runContainer(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto)request);
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ };
+ }
+
+ /**
+ * rpc runContainer(.RunContainerRequestProto) returns (.RunContainerResponseProto);
+ */
+ public abstract void runContainer(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
+ public static final
+ com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.getDescriptor().getServices().get(0);
+ }
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final void callMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request,
+ com.google.protobuf.RpcCallback<
+ com.google.protobuf.Message> done) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callMethod() given method descriptor for wrong " +
+ "service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ this.runContainer(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public static Stub newStub(
+ com.google.protobuf.RpcChannel channel) {
+ return new Stub(channel);
+ }
+
+ public static final class Stub extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapDaemonProtocol implements Interface {
+ private Stub(com.google.protobuf.RpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.RpcChannel channel;
+
+ public com.google.protobuf.RpcChannel getChannel() {
+ return channel;
+ }
+
+ public void runContainer(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.class,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance()));
+ }
+ }
+
+ public static BlockingInterface newBlockingStub(
+ com.google.protobuf.BlockingRpcChannel channel) {
+ return new BlockingStub(channel);
+ }
+
+ public interface BlockingInterface {
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto runContainer(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request)
+ throws com.google.protobuf.ServiceException;
+ }
+
+ private static final class BlockingStub implements BlockingInterface {
+ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.BlockingRpcChannel channel;
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto runContainer(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance());
+ }
+
+ }
+
+ // @@protoc_insertion_point(class_scope:LlapDaemonProtocol)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_RunContainerRequestProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_RunContainerRequestProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_RunContainerResponseProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_RunContainerResponseProto_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\030LlapDaemonProtocol.proto\"\330\001\n\030RunContai" +
+ "nerRequestProto\022\033\n\023container_id_string\030\001" +
+ " \001(\t\022\017\n\007am_host\030\002 \001(\t\022\017\n\007am_port\030\003 \001(\005\022\030" +
+ "\n\020token_identifier\030\004 \001(\t\022\032\n\022credentials_" +
+ "binary\030\005 \001(\014\022\014\n\004user\030\006 \001(\t\022\035\n\025applicatio" +
+ "n_id_string\030\007 \001(\t\022\032\n\022app_attempt_number\030" +
+ "\010 \001(\005\"\033\n\031RunContainerResponseProto2[\n\022Ll" +
+ "apDaemonProtocol\022E\n\014runContainer\022\031.RunCo" +
+ "ntainerRequestProto\032\032.RunContainerRespon" +
+ "seProtoBH\n&org.apache.hadoop.hive.llap.d",
+ "aemon.rpcB\030LlapDaemonProtocolProtos\210\001\001\240\001" +
+ "\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_RunContainerRequestProto_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_RunContainerRequestProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_RunContainerRequestProto_descriptor,
+ new java.lang.String[] { "ContainerIdString", "AmHost", "AmPort", "TokenIdentifier", "CredentialsBinary", "User", "ApplicationIdString", "AppAttemptNumber", });
+ internal_static_RunContainerResponseProto_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_RunContainerResponseProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_RunContainerResponseProto_descriptor,
+ new java.lang.String[] { });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
new file mode 100644
index 0000000..7dc5a51
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+
+public interface ContainerRunner {
+
+ void queueContainer(LlapDaemonProtocolProtos.RunContainerRequestProto request) throws IOException;
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonConfiguration.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonConfiguration.java
new file mode 100644
index 0000000..73f6a0a
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonConfiguration.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon;
+
+import org.apache.hadoop.conf.Configuration;
+
+public class LlapDaemonConfiguration extends Configuration {
+
+ public LlapDaemonConfiguration(Configuration conf) {
+ super(conf);
+ addResource(LLAP_DAEMON_SITE);
+ }
+
+ public LlapDaemonConfiguration() {
+ super(false);
+ addResource(LLAP_DAEMON_SITE);
+ }
+
+
+ private static final String LLAP_DAEMON_PREFIX = "llap.daemon.";
+ private static final String LLAP_DAEMON_AM_PREFIX = LLAP_DAEMON_PREFIX + "am.";
+ private static final String LLAP_DAEMON_SITE = "llap-daemon-site.xml";
+
+
+
+ public static final String LLAP_DAEMON_RPC_NUM_HANDLERS = LLAP_DAEMON_PREFIX + "rpc.num.handlers";
+ public static final int LLAP_DAEMON_RPC_NUM_HANDLERS_DEFAULT = 5;
+
+ public static final String LLAP_DAEMON_WORK_DIRS = LLAP_DAEMON_PREFIX + "work.dirs";
+
+ public static final String LLAP_DAEMON_YARN_SHUFFLE_PORT = LLAP_DAEMON_PREFIX + "yarn.shuffle.port";
+
+
+ // Section for configs used in AM and executors
+ public static final String LLAP_DAEMON_NUM_EXECUTORS = LLAP_DAEMON_PREFIX + "num.executors";
+ public static final int LLAP_DAEMON_NUM_EXECUTORS_DEFAULT = 4;
+ public static final String LLAP_DAEMON_RPC_PORT = LLAP_DAEMON_PREFIX + "rpc.port";
+ public static final int LLAP_DAEMON_RPC_PORT_DEFAULT = 15001;
+ public static final String LLAP_DAEMON_MEMORY_PER_INSTANCE_MB = LLAP_DAEMON_AM_PREFIX + "memory.per.instance.mb";
+ public static final int LLAP_DAEMON_MEMORY_PER_INSTANCE_MB_DEFAULT = 4096;
+ public static final String LLAP_DAEMON_VCPUS_PER_INSTANCE = LLAP_DAEMON_AM_PREFIX + "vcpus.per.instance";
+ public static final int LLAP_DAEMON_VCPUS_PER_INSTANCE_DEFAULT = 4;
+
+
+ // Section for configs used in the AM //
+ public static final String LLAP_DAEMON_AM_SERVICE_HOSTS = LLAP_DAEMON_AM_PREFIX + "service.hosts";
+ public static final String LLAP_DAEMON_AM_COMMUNICATOR_NUM_THREADS = LLAP_DAEMON_AM_PREFIX + "communicator.num.threads";
+ public static final int LLAP_DAEMON_AM_COMMUNICATOR_NUM_THREADS_DEFAULT = 5;
+
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonProtocolBlockingPB.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonProtocolBlockingPB.java
new file mode 100644
index 0000000..5ad2344
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonProtocolBlockingPB.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon;
+
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+
+@ProtocolInfo(protocolName = "org.apache.hadoop.hive.llap.daemon.LlapDaemonProtocolBlockingPB", protocolVersion = 1)
+public interface LlapDaemonProtocolBlockingPB extends LlapDaemonProtocolProtos.LlapDaemonProtocol.BlockingInterface {
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
new file mode 100644
index 0000000..9ffe1d8
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon.impl;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Stopwatch;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.util.AuxiliaryServiceHelper;
+import org.apache.log4j.Logger;
+import org.apache.tez.common.security.JobTokenIdentifier;
+import org.apache.tez.common.security.TokenCache;
+import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto;
+import org.apache.tez.dag.api.TezConstants;
+import org.apache.tez.runtime.api.ExecutionContext;
+import org.apache.tez.runtime.api.impl.ExecutionContextImpl;
+import org.apache.tez.runtime.common.objectregistry.ObjectRegistryImpl;
+import org.apache.tez.runtime.task.TezChild;
+import org.apache.tez.runtime.task.TezChild.ContainerExecutionResult;
+import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler;
+
+public class ContainerRunnerImpl extends AbstractService implements ContainerRunner {
+
+ private static final Logger LOG = Logger.getLogger(ContainerRunnerImpl.class);
+
+ private final int numExecutors;
+ private final ListeningExecutorService executorService;
+ private final AtomicReference localAddress;
+ private final String[] localDirsBase;
+ private final int localShufflePort;
+ private final Map localEnv = new HashMap();
+ private volatile FileSystem localFs;
+ private final long memoryPerExecutor;
+ // TODO Support for removing queued containers, interrupting / killing specific containers
+
+ public ContainerRunnerImpl(int numExecutors, String[] localDirsBase, int localShufflePort,
+ AtomicReference localAddress,
+ long totalMemoryAvailableBytes) {
+ super("ContainerRunnerImpl");
+ Preconditions.checkState(numExecutors > 0,
+ "Invalid number of executors: " + numExecutors + ". Must be > 0");
+ this.numExecutors = numExecutors;
+ this.localDirsBase = localDirsBase;
+ this.localShufflePort = localShufflePort;
+ this.localAddress = localAddress;
+
+ ExecutorService raw = Executors.newFixedThreadPool(numExecutors,
+ new ThreadFactoryBuilder().setNameFormat("ContainerExecutor %d").build());
+ this.executorService = MoreExecutors.listeningDecorator(raw);
+ AuxiliaryServiceHelper.setServiceDataIntoEnv(
+ TezConstants.TEZ_SHUFFLE_HANDLER_SERVICE_ID,
+ ByteBuffer.allocate(4).putInt(localShufflePort), localEnv);
+
+ // 80% of memory considered for accounted buffers. Rest for objects.
+ // TODO Tune this based on the available size.
+ this.memoryPerExecutor = (long)(totalMemoryAvailableBytes * 0.8 / (float) numExecutors);
+
+ LOG.info("ContainerRunnerImpl config: " +
+ "memoryPerExecutorDerviced=" + memoryPerExecutor
+ );
+ }
+
+ @Override
+ public void serviceInit(Configuration conf) {
+ try {
+ localFs = FileSystem.getLocal(conf);
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to setup local filesystem instance", e);
+ }
+ }
+
+ @Override
+ public void serviceStart() {
+ }
+
+ @Override
+ protected void serviceStop() throws Exception {
+ super.serviceStop();
+ }
+
+ // TODO Move this into a utilities class
+ private static String createAppSpecificLocalDir(String baseDir, String applicationIdString,
+ String user) {
+ // TODO This is broken for secure clusters. The app will not have permission to create these directories.
+ // May work via Slider - since the directory would already exist. Otherwise may need a custom shuffle handler.
+ // TODO This should be the process user - and not the user on behalf of whom the query is being submitted.
+ return baseDir + File.separator + "usercache" + File.separator + user + File.separator +
+ "appcache" + File.separator + applicationIdString;
+ }
+
+ @Override
+ public void queueContainer(RunContainerRequestProto request) throws IOException {
+ LOG.info("Queing container for execution: " + request);
+
+ Map env = new HashMap();
+ // TODO What else is required in this environment map.
+ env.putAll(localEnv);
+ env.put(ApplicationConstants.Environment.USER.name(), request.getUser());
+
+ String[] localDirs = new String[localDirsBase.length];
+
+ // Setup up local dirs to be application specific, and create them.
+ for (int i = 0; i < localDirsBase.length; i++) {
+ localDirs[i] = createAppSpecificLocalDir(localDirsBase[i], request.getApplicationIdString(),
+ request.getUser());
+ localFs.mkdirs(new Path(localDirs[i]));
+ }
+ LOG.info("DEBUG: Dirs are: " + Arrays.toString(localDirs));
+
+
+ // Setup workingDir. This is otherwise setup as Environment.PWD
+ // Used for re-localization, to add the user specified configuration (conf_pb_binary_stream)
+ // TODO Set this up to read user configuration if required. Ideally, Inputs / Outputs should be self configured.
+ // Setting this up correctly is more from framework components to setup security, ping intervals, etc.
+ String workingDir = localDirs[0];
+
+ Credentials credentials = new Credentials();
+ DataInputBuffer dib = new DataInputBuffer();
+ byte[] tokenBytes = request.getCredentialsBinary().toByteArray();
+ dib.reset(tokenBytes, tokenBytes.length);
+ credentials.readTokenStorageStream(dib);
+
+ Token jobToken = TokenCache.getSessionToken(credentials);
+
+ // TODO Unregistering does not happen at the moment, since there's no signals on when an app completes.
+ LOG.info("DEBUG: Registering request with the ShuffleHandler");
+ ShuffleHandler.get().registerApplication(request.getApplicationIdString(), jobToken, request.getUser());
+
+
+ ContainerRunnerCallable callable = new ContainerRunnerCallable(request, new Configuration(getConfig()),
+ new ExecutionContextImpl(localAddress.get().getHostName()), env, localDirs,
+ workingDir, credentials, memoryPerExecutor);
+ ListenableFuture future = executorService
+ .submit(callable);
+ Futures.addCallback(future, new ContainerRunnerCallback(request, callable));
+ }
+
+ static class ContainerRunnerCallable implements Callable {
+
+ private final RunContainerRequestProto request;
+ private final Configuration conf;
+ private final String workingDir;
+ private final String[] localDirs;
+ private final Map envMap;
+ // TODO Is a null pid valid - will this work with multiple different ResourceMonitors ?
+ private final String pid = null;
+ private final ObjectRegistryImpl objectRegistry;
+ private final ExecutionContext executionContext;
+ private final Credentials credentials;
+ private final long memoryAvailable;
+ private volatile TezChild tezChild;
+
+
+ ContainerRunnerCallable(RunContainerRequestProto request, Configuration conf,
+ ExecutionContext executionContext, Map envMap,
+ String[] localDirs, String workingDir, Credentials credentials,
+ long memoryAvailable) {
+ this.request = request;
+ this.conf = conf;
+ this.executionContext = executionContext;
+ this.envMap = envMap;
+ this.workingDir = workingDir;
+ this.localDirs = localDirs;
+ this.objectRegistry = new ObjectRegistryImpl();
+ this.credentials = credentials;
+ this.memoryAvailable = memoryAvailable;
+
+ }
+
+ @Override
+ public ContainerExecutionResult call() throws Exception {
+ Stopwatch sw = new Stopwatch().start();
+ tezChild =
+ new TezChild(conf, request.getAmHost(), request.getAmPort(),
+ request.getContainerIdString(),
+ request.getTokenIdentifier(), request.getAppAttemptNumber(), workingDir, localDirs,
+ envMap, objectRegistry, pid,
+ executionContext, credentials, memoryAvailable, request.getUser());
+ ContainerExecutionResult result = tezChild.run();
+ LOG.info("ExecutionTime for Container: " + request.getContainerIdString() + "=" +
+ sw.stop().elapsedMillis());
+ return result;
+ }
+
+ public TezChild getTezChild() {
+ return this.tezChild;
+ }
+ }
+
+ final class ContainerRunnerCallback implements FutureCallback {
+
+ private final RunContainerRequestProto request;
+ private final ContainerRunnerCallable containerRunnerCallable;
+
+ ContainerRunnerCallback(RunContainerRequestProto request,
+ ContainerRunnerCallable containerRunnerCallable) {
+ this.request = request;
+ this.containerRunnerCallable = containerRunnerCallable;
+ }
+
+ // TODO Slightly more useful error handling
+ @Override
+ public void onSuccess(ContainerExecutionResult result) {
+ switch (result.getExitStatus()) {
+ case SUCCESS:
+ LOG.info("Successfully finished: " + request.getApplicationIdString() + ", containerId=" +
+ request.getContainerIdString());
+ break;
+ case EXECUTION_FAILURE:
+ LOG.info("Failed to run: " + request.getApplicationIdString() + ", containerId=" +
+ request.getContainerIdString(), result.getThrowable());
+ break;
+ case INTERRUPTED:
+ LOG.info(
+ "Interrupted while running: " + request.getApplicationIdString() + ", containerId=" +
+ request.getContainerIdString(), result.getThrowable());
+ break;
+ case ASKED_TO_DIE:
+ LOG.info(
+ "Asked to die while running: " + request.getApplicationIdString() + ", containerId=" +
+ request.getContainerIdString());
+ break;
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error(
+ "TezChild execution failed for : " + request.getApplicationIdString() + ", containerId=" +
+ request.getContainerIdString());
+ TezChild tezChild = containerRunnerCallable.getTezChild();
+ if (tezChild != null) {
+ tezChild.shutdown();
+ }
+ }
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
new file mode 100644
index 0000000..1a66568
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon.impl;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.log4j.Logger;
+import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
+import org.apache.hadoop.hive.llap.daemon.LlapDaemonConfiguration;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto;
+import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler;
+
+public class LlapDaemon extends AbstractService implements ContainerRunner {
+
+ private static final Logger LOG = Logger.getLogger(LlapDaemon.class);
+
+ private final LlapDaemonConfiguration daemonConf;
+ private final int numExecutors;
+ private final int rpcPort;
+ private final LlapDaemonProtocolServerImpl server;
+ private final ContainerRunnerImpl containerRunner;
+ private final String[] localDirs;
+ private final int shufflePort;
+ // TODO Not the best way to share the address
+ private final AtomicReference address = new AtomicReference();
+
+ public LlapDaemon(LlapDaemonConfiguration daemonConf) {
+ super("LlapDaemon");
+ // TODO This needs to read TezConfiguration to pick up things like the heartbeat interval from config.
+ // Ideally, this would be part of llap-daemon-configuration
+ this.numExecutors = daemonConf.getInt(LlapDaemonConfiguration.LLAP_DAEMON_NUM_EXECUTORS,
+ LlapDaemonConfiguration.LLAP_DAEMON_NUM_EXECUTORS_DEFAULT);
+ this.rpcPort = daemonConf.getInt(LlapDaemonConfiguration.LLAP_DAEMON_RPC_PORT,
+ LlapDaemonConfiguration.LLAP_DAEMON_RPC_PORT_DEFAULT);
+ this.daemonConf = daemonConf;
+ this.localDirs = daemonConf.getTrimmedStrings(LlapDaemonConfiguration.LLAP_DAEMON_WORK_DIRS);
+ this.shufflePort = daemonConf.getInt(LlapDaemonConfiguration.LLAP_DAEMON_YARN_SHUFFLE_PORT, -1);
+
+ long memoryAvailableBytes = this.daemonConf
+ .getInt(LlapDaemonConfiguration.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB,
+ LlapDaemonConfiguration.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB_DEFAULT) * 1024l * 1024l;
+ long jvmMax = Runtime.getRuntime().maxMemory();
+
+ LOG.info("LlapDaemon started with the following configuration: " +
+ "numExecutors=" + numExecutors +
+ ", rpcListenerPort=" + rpcPort +
+ ", workDirs=" + Arrays.toString(localDirs) +
+ ", shufflePort=" + shufflePort);
+
+ Preconditions.checkArgument(this.numExecutors > 0);
+ Preconditions.checkArgument(this.rpcPort > 1024 && this.rpcPort < 65536,
+ "RPC Port must be between 1025 and 65534");
+ Preconditions.checkArgument(this.localDirs != null && this.localDirs.length > 0,
+ "Work dirs must be specified");
+ Preconditions.checkArgument(this.shufflePort > 0, "ShufflePort must be specified");
+ Preconditions.checkState(jvmMax >= memoryAvailableBytes,
+ "Invalid configuration. Xmx value too small. maxAvailable=" + jvmMax + ", configured=" +
+ memoryAvailableBytes);
+
+ this.server = new LlapDaemonProtocolServerImpl(daemonConf, this, address);
+ this.containerRunner = new ContainerRunnerImpl(numExecutors, localDirs, shufflePort, address,
+ memoryAvailableBytes);
+ }
+
+ @Override
+ public void serviceInit(Configuration conf) {
+ server.init(conf);
+ containerRunner.init(conf);
+ }
+
+ @Override
+ public void serviceStart() {
+ server.start();
+ containerRunner.start();
+
+ }
+
+ public void serviceStop() {
+ containerRunner.stop();
+ server.stop();
+ }
+
+
+ public static void main(String[] args) throws Exception {
+ LlapDaemonConfiguration daemonConf = new LlapDaemonConfiguration();
+
+ Configuration shuffleHandlerConf = new Configuration(daemonConf);
+ shuffleHandlerConf.set(ShuffleHandler.SHUFFLE_HANDLER_LOCAL_DIRS,
+ daemonConf.get(LlapDaemonConfiguration.LLAP_DAEMON_WORK_DIRS));
+ ShuffleHandler.initializeAndStart(shuffleHandlerConf);
+
+ LlapDaemon llapDaemon = new LlapDaemon(daemonConf);
+ // TODO Get the PID - FWIW
+
+ llapDaemon.init(new Configuration());
+ llapDaemon.start();
+ LOG.info("Started LlapDaemon");
+ // Relying on the RPC threads to keep the service alive.
+ }
+
+
+ @Override
+ public void queueContainer(RunContainerRequestProto request) throws IOException {
+ containerRunner.queueContainer(request);
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolClientImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolClientImpl.java
new file mode 100644
index 0000000..3e6fc0a
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolClientImpl.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon.impl;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.hive.llap.daemon.LlapDaemonProtocolBlockingPB;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto;
+
+// TODO Change all this to be based on a regular interface instead of relying on the Proto service - Exception signatures cannot be controlled without this for the moment.
+
+
+public class LlapDaemonProtocolClientImpl implements LlapDaemonProtocolBlockingPB {
+
+ private final Configuration conf;
+ private final InetSocketAddress serverAddr;
+ LlapDaemonProtocolBlockingPB proxy;
+
+
+ public LlapDaemonProtocolClientImpl(Configuration conf, String hostname, int port) {
+ this.conf = conf;
+ this.serverAddr = NetUtils.createSocketAddr(hostname, port);
+ }
+
+ @Override
+ public RunContainerResponseProto runContainer(RpcController controller,
+ RunContainerRequestProto request) throws
+ ServiceException {
+ try {
+ return getProxy().runContainer(null, request);
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+
+ public LlapDaemonProtocolBlockingPB getProxy() throws IOException {
+ if (proxy == null) {
+ proxy = createProxy();
+ }
+ return proxy;
+ }
+
+ public LlapDaemonProtocolBlockingPB createProxy() throws IOException {
+ LlapDaemonProtocolBlockingPB p;
+ // TODO Fix security
+ RPC.setProtocolEngine(conf, LlapDaemonProtocolBlockingPB.class, ProtobufRpcEngine.class);
+ p = (LlapDaemonProtocolBlockingPB) RPC
+ .getProxy(LlapDaemonProtocolBlockingPB.class, 0, serverAddr, conf);
+ return p;
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolServerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolServerImpl.java
new file mode 100644
index 0000000..4703904
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolServerImpl.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon.impl;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
+import org.apache.hadoop.hive.llap.daemon.LlapDaemonConfiguration;
+import org.apache.hadoop.hive.llap.daemon.LlapDaemonProtocolBlockingPB;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto;
+
+public class LlapDaemonProtocolServerImpl extends AbstractService
+ implements LlapDaemonProtocolBlockingPB {
+
+ private static final Log LOG = LogFactory.getLog(LlapDaemonProtocolServerImpl.class);
+
+ private final LlapDaemonConfiguration daemonConf;
+ private final ContainerRunner containerRunner;
+ private RPC.Server server;
+ private final AtomicReference bindAddress;
+
+
+ public LlapDaemonProtocolServerImpl(LlapDaemonConfiguration daemonConf,
+ ContainerRunner containerRunner,
+ AtomicReference address) {
+ super("LlapDaemonProtocolServerImpl");
+ this.daemonConf = daemonConf;
+ this.containerRunner = containerRunner;
+ this.bindAddress = address;
+ }
+
+ @Override
+ public RunContainerResponseProto runContainer(RpcController controller,
+ RunContainerRequestProto request) throws
+ ServiceException {
+ LOG.info("Received request: " + request);
+ try {
+ containerRunner.queueContainer(request);
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return RunContainerResponseProto.getDefaultInstance();
+ }
+
+
+ @Override
+ public void serviceStart() {
+ Configuration conf = getConfig();
+
+ int numHandlers = daemonConf.getInt(LlapDaemonConfiguration.LLAP_DAEMON_RPC_NUM_HANDLERS,
+ LlapDaemonConfiguration.LLAP_DAEMON_RPC_NUM_HANDLERS_DEFAULT);
+ int port = daemonConf.getInt(LlapDaemonConfiguration.LLAP_DAEMON_RPC_PORT,
+ LlapDaemonConfiguration.LLAP_DAEMON_RPC_PORT_DEFAULT);
+ InetSocketAddress addr = new InetSocketAddress(port);
+ LOG.info("Attempting to start LlapDaemonProtocol on port=" + port + ", with numHandlers=" +
+ numHandlers);
+
+ try {
+ server = createServer(LlapDaemonProtocolBlockingPB.class, addr, conf, numHandlers,
+ LlapDaemonProtocolProtos.LlapDaemonProtocol.newReflectiveBlockingService(this));
+ server.start();
+ } catch (IOException e) {
+ LOG.error("Failed to run RPC Server on port: " + port, e);
+ throw new RuntimeException(e);
+ }
+
+ InetSocketAddress serverBindAddress = NetUtils.getConnectAddress(server);
+ this.bindAddress.set(NetUtils.createSocketAddrForHost(
+ serverBindAddress.getAddress().getCanonicalHostName(),
+ serverBindAddress.getPort()));
+ LOG.info("Instantiated LlapDaemonProtocol at " + bindAddress);
+ }
+
+ @Override
+ public void serviceStop() {
+ if (server != null) {
+ server.stop();
+ }
+ }
+
+ @InterfaceAudience.Private
+ @VisibleForTesting
+ InetSocketAddress getBindAddress() {
+ return this.bindAddress.get();
+ }
+
+ private RPC.Server createServer(Class> pbProtocol, InetSocketAddress addr, Configuration conf,
+ int numHandlers, BlockingService blockingService) throws
+ IOException {
+ RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine.class);
+ RPC.Server server = new RPC.Builder(conf)
+ .setProtocol(pbProtocol)
+ .setInstance(blockingService)
+ .setBindAddress(addr.getHostName())
+ .setPort(addr.getPort())
+ .setNumHandlers(numHandlers)
+ .build();
+ // TODO Add security.
+ return server;
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/FadvisedChunkedFile.java llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/FadvisedChunkedFile.java
new file mode 100644
index 0000000..b23e25e
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/FadvisedChunkedFile.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.shufflehandler;
+
+import java.io.FileDescriptor;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.ReadaheadPool;
+import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.jboss.netty.handler.stream.ChunkedFile;
+
+public class FadvisedChunkedFile extends ChunkedFile {
+
+ private static final Log LOG = LogFactory.getLog(FadvisedChunkedFile.class);
+
+ private final boolean manageOsCache;
+ private final int readaheadLength;
+ private final ReadaheadPool readaheadPool;
+ private final FileDescriptor fd;
+ private final String identifier;
+
+ private ReadaheadRequest readaheadRequest;
+
+ public FadvisedChunkedFile(RandomAccessFile file, long position, long count,
+ int chunkSize, boolean manageOsCache, int readaheadLength,
+ ReadaheadPool readaheadPool, String identifier) throws IOException {
+ super(file, position, count, chunkSize);
+ this.manageOsCache = manageOsCache;
+ this.readaheadLength = readaheadLength;
+ this.readaheadPool = readaheadPool;
+ this.fd = file.getFD();
+ this.identifier = identifier;
+ }
+
+ @Override
+ public Object nextChunk() throws Exception {
+ if (manageOsCache && readaheadPool != null) {
+ readaheadRequest = readaheadPool
+ .readaheadStream(identifier, fd, getCurrentOffset(), readaheadLength,
+ getEndOffset(), readaheadRequest);
+ }
+ return super.nextChunk();
+ }
+
+ @Override
+ public void close() throws Exception {
+ if (readaheadRequest != null) {
+ readaheadRequest.cancel();
+ }
+ if (manageOsCache && getEndOffset() - getStartOffset() > 0) {
+ try {
+ NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(identifier,
+ fd,
+ getStartOffset(), getEndOffset() - getStartOffset(),
+ NativeIO.POSIX.POSIX_FADV_DONTNEED);
+ } catch (Throwable t) {
+ LOG.warn("Failed to manage OS cache for " + identifier, t);
+ }
+ }
+ super.close();
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/FadvisedFileRegion.java llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/FadvisedFileRegion.java
new file mode 100644
index 0000000..69ea363
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/FadvisedFileRegion.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.shufflehandler;
+
+import java.io.FileDescriptor;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.WritableByteChannel;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.ReadaheadPool;
+import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.jboss.netty.channel.DefaultFileRegion;
+
+public class FadvisedFileRegion extends DefaultFileRegion {
+
+ private static final Log LOG = LogFactory.getLog(FadvisedFileRegion.class);
+
+ private final boolean manageOsCache;
+ private final int readaheadLength;
+ private final ReadaheadPool readaheadPool;
+ private final FileDescriptor fd;
+ private final String identifier;
+ private final long count;
+ private final long position;
+ private final int shuffleBufferSize;
+ private final boolean shuffleTransferToAllowed;
+ private final FileChannel fileChannel;
+
+ private ReadaheadRequest readaheadRequest;
+
+ public FadvisedFileRegion(RandomAccessFile file, long position, long count,
+ boolean manageOsCache, int readaheadLength, ReadaheadPool readaheadPool,
+ String identifier, int shuffleBufferSize,
+ boolean shuffleTransferToAllowed) throws IOException {
+ super(file.getChannel(), position, count);
+ this.manageOsCache = manageOsCache;
+ this.readaheadLength = readaheadLength;
+ this.readaheadPool = readaheadPool;
+ this.fd = file.getFD();
+ this.identifier = identifier;
+ this.fileChannel = file.getChannel();
+ this.count = count;
+ this.position = position;
+ this.shuffleBufferSize = shuffleBufferSize;
+ this.shuffleTransferToAllowed = shuffleTransferToAllowed;
+ }
+
+ @Override
+ public long transferTo(WritableByteChannel target, long position)
+ throws IOException {
+ if (manageOsCache && readaheadPool != null) {
+ readaheadRequest = readaheadPool.readaheadStream(identifier, fd,
+ getPosition() + position, readaheadLength,
+ getPosition() + getCount(), readaheadRequest);
+ }
+
+ if(this.shuffleTransferToAllowed) {
+ return super.transferTo(target, position);
+ } else {
+ return customShuffleTransfer(target, position);
+ }
+ }
+
+ /**
+ * This method transfers data using local buffer. It transfers data from
+ * a disk to a local buffer in memory, and then it transfers data from the
+ * buffer to the target. This is used only if transferTo is disallowed in
+ * the configuration file. super.TransferTo does not perform well on Windows
+ * due to a small IO request generated. customShuffleTransfer can control
+ * the size of the IO requests by changing the size of the intermediate
+ * buffer.
+ */
+ @VisibleForTesting
+ long customShuffleTransfer(WritableByteChannel target, long position)
+ throws IOException {
+ long actualCount = this.count - position;
+ if (actualCount < 0 || position < 0) {
+ throw new IllegalArgumentException(
+ "position out of range: " + position +
+ " (expected: 0 - " + (this.count - 1) + ')');
+ }
+ if (actualCount == 0) {
+ return 0L;
+ }
+
+ long trans = actualCount;
+ int readSize;
+ ByteBuffer byteBuffer = ByteBuffer.allocate(this.shuffleBufferSize);
+
+ while(trans > 0L &&
+ (readSize = fileChannel.read(byteBuffer, this.position+position)) > 0) {
+ //adjust counters and buffer limit
+ if(readSize < trans) {
+ trans -= readSize;
+ position += readSize;
+ byteBuffer.flip();
+ } else {
+ //We can read more than we need if the actualCount is not multiple
+ //of the byteBuffer size and file is big enough. In that case we cannot
+ //use flip method but we need to set buffer limit manually to trans.
+ byteBuffer.limit((int)trans);
+ byteBuffer.position(0);
+ position += trans;
+ trans = 0;
+ }
+
+ //write data to the target
+ while(byteBuffer.hasRemaining()) {
+ target.write(byteBuffer);
+ }
+
+ byteBuffer.clear();
+ }
+
+ return actualCount - trans;
+ }
+
+
+ @Override
+ public void releaseExternalResources() {
+ if (readaheadRequest != null) {
+ readaheadRequest.cancel();
+ }
+ super.releaseExternalResources();
+ }
+
+ /**
+ * Call when the transfer completes successfully so we can advise the OS that
+ * we don't need the region to be cached anymore.
+ */
+ public void transferSuccessful() {
+ if (manageOsCache && getCount() > 0) {
+ try {
+ NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(identifier,
+ fd, getPosition(), getCount(),
+ NativeIO.POSIX.POSIX_FADV_DONTNEED);
+ } catch (Throwable t) {
+ LOG.warn("Failed to manage OS cache for " + identifier, t);
+ }
+ }
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/IndexCache.java llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/IndexCache.java
new file mode 100644
index 0000000..a647a55
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/IndexCache.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.llap.shufflehandler;
+
+import java.io.IOException;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.tez.runtime.library.common.Constants;
+import org.apache.tez.runtime.library.common.sort.impl.TezIndexRecord;
+import org.apache.tez.runtime.library.common.sort.impl.TezSpillRecord;
+
+class IndexCache {
+
+ private final Configuration conf;
+ private final int totalMemoryAllowed;
+ private AtomicInteger totalMemoryUsed = new AtomicInteger();
+ private static final Log LOG = LogFactory.getLog(IndexCache.class);
+
+ private final ConcurrentHashMap cache =
+ new ConcurrentHashMap();
+
+ private final LinkedBlockingQueue queue =
+ new LinkedBlockingQueue();
+
+ public IndexCache(Configuration conf) {
+ this.conf = conf;
+ totalMemoryAllowed = 10 * 1024 * 1024;
+ LOG.info("IndexCache created with max memory = " + totalMemoryAllowed);
+ }
+
+ /**
+ * This method gets the index information for the given mapId and reduce.
+ * It reads the index file into cache if it is not already present.
+ * @param mapId
+ * @param reduce
+ * @param fileName The file to read the index information from if it is not
+ * already present in the cache
+ * @param expectedIndexOwner The expected owner of the index file
+ * @return The Index Information
+ * @throws IOException
+ */
+ public TezIndexRecord getIndexInformation(String mapId, int reduce,
+ Path fileName, String expectedIndexOwner)
+ throws IOException {
+
+ IndexInformation info = cache.get(mapId);
+
+ if (info == null) {
+ info = readIndexFileToCache(fileName, mapId, expectedIndexOwner);
+ } else {
+ synchronized(info) {
+ while (isUnderConstruction(info)) {
+ try {
+ info.wait();
+ } catch (InterruptedException e) {
+ throw new IOException("Interrupted waiting for construction", e);
+ }
+ }
+ }
+ LOG.debug("IndexCache HIT: MapId " + mapId + " found");
+ }
+
+ if (info.mapSpillRecord.size() == 0 ||
+ info.mapSpillRecord.size() <= reduce) {
+ throw new IOException("Invalid request " +
+ " Map Id = " + mapId + " Reducer = " + reduce +
+ " Index Info Length = " + info.mapSpillRecord.size());
+ }
+ return info.mapSpillRecord.getIndex(reduce);
+ }
+
+ private boolean isUnderConstruction(IndexInformation info) {
+ synchronized(info) {
+ return (null == info.mapSpillRecord);
+ }
+ }
+
+ private IndexInformation readIndexFileToCache(Path indexFileName,
+ String mapId,
+ String expectedIndexOwner)
+ throws IOException {
+ IndexInformation info;
+ IndexInformation newInd = new IndexInformation();
+ if ((info = cache.putIfAbsent(mapId, newInd)) != null) {
+ synchronized(info) {
+ while (isUnderConstruction(info)) {
+ try {
+ info.wait();
+ } catch (InterruptedException e) {
+ throw new IOException("Interrupted waiting for construction", e);
+ }
+ }
+ }
+ LOG.debug("IndexCache HIT: MapId " + mapId + " found");
+ return info;
+ }
+ LOG.debug("IndexCache MISS: MapId " + mapId + " not found") ;
+ TezSpillRecord tmp = null;
+ try {
+ tmp = new TezSpillRecord(indexFileName, conf, expectedIndexOwner);
+ } catch (Throwable e) {
+ tmp = new TezSpillRecord(0);
+ cache.remove(mapId);
+ throw new IOException("Error Reading IndexFile", e);
+ } finally {
+ synchronized (newInd) {
+ newInd.mapSpillRecord = tmp;
+ newInd.notifyAll();
+ }
+ }
+ queue.add(mapId);
+
+ if (totalMemoryUsed.addAndGet(newInd.getSize()) > totalMemoryAllowed) {
+ freeIndexInformation();
+ }
+ return newInd;
+ }
+
+ /**
+ * This method removes the map from the cache if index information for this
+ * map is loaded(size>0), index information entry in cache will not be
+ * removed if it is in the loading phrase(size=0), this prevents corruption
+ * of totalMemoryUsed. It should be called when a map output on this tracker
+ * is discarded.
+ * @param mapId The taskID of this map.
+ */
+ public void removeMap(String mapId) {
+ IndexInformation info = cache.get(mapId);
+ if (info == null || ((info != null) && isUnderConstruction(info))) {
+ return;
+ }
+ info = cache.remove(mapId);
+ if (info != null) {
+ totalMemoryUsed.addAndGet(-info.getSize());
+ if (!queue.remove(mapId)) {
+ LOG.warn("Map ID" + mapId + " not found in queue!!");
+ }
+ } else {
+ LOG.info("Map ID " + mapId + " not found in cache");
+ }
+ }
+
+ /**
+ * This method checks if cache and totolMemoryUsed is consistent.
+ * It is only used for unit test.
+ * @return True if cache and totolMemoryUsed is consistent
+ */
+ boolean checkTotalMemoryUsed() {
+ int totalSize = 0;
+ for (IndexInformation info : cache.values()) {
+ totalSize += info.getSize();
+ }
+ return totalSize == totalMemoryUsed.get();
+ }
+
+ /**
+ * Bring memory usage below totalMemoryAllowed.
+ */
+ private synchronized void freeIndexInformation() {
+ while (totalMemoryUsed.get() > totalMemoryAllowed) {
+ String s = queue.remove();
+ IndexInformation info = cache.remove(s);
+ if (info != null) {
+ totalMemoryUsed.addAndGet(-info.getSize());
+ }
+ }
+ }
+
+ private static class IndexInformation {
+ TezSpillRecord mapSpillRecord;
+
+ int getSize() {
+ return mapSpillRecord == null
+ ? 0
+ : mapSpillRecord.size() * Constants.MAP_OUTPUT_INDEX_RECORD_LENGTH;
+ }
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
new file mode 100644
index 0000000..ed9a51d
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
@@ -0,0 +1,836 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.shufflehandler;
+
+import static org.jboss.netty.buffer.ChannelBuffers.wrappedBuffer;
+import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.CONTENT_TYPE;
+import static org.jboss.netty.handler.codec.http.HttpMethod.GET;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.METHOD_NOT_ALLOWED;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.NOT_FOUND;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.UNAUTHORIZED;
+import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
+
+import javax.crypto.SecretKey;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Pattern;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataInputByteBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.ReadaheadPool;
+import org.apache.hadoop.io.SecureIOUtils;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.MutableCounterInt;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.tez.common.security.JobTokenIdentifier;
+import org.apache.tez.common.security.JobTokenSecretManager;
+import org.apache.tez.runtime.library.common.security.SecureShuffleUtils;
+import org.apache.tez.runtime.library.common.shuffle.orderedgrouped.ShuffleHeader;
+import org.apache.tez.runtime.library.common.sort.impl.TezIndexRecord;
+import org.jboss.netty.bootstrap.ServerBootstrap;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFactory;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.ChannelFutureListener;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.ChannelPipeline;
+import org.jboss.netty.channel.ChannelPipelineFactory;
+import org.jboss.netty.channel.ChannelStateEvent;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.channel.ExceptionEvent;
+import org.jboss.netty.channel.MessageEvent;
+import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
+import org.jboss.netty.channel.group.ChannelGroup;
+import org.jboss.netty.channel.group.DefaultChannelGroup;
+import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+import org.jboss.netty.handler.codec.frame.TooLongFrameException;
+import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
+import org.jboss.netty.handler.codec.http.HttpChunkAggregator;
+import org.jboss.netty.handler.codec.http.HttpHeaders;
+import org.jboss.netty.handler.codec.http.HttpRequest;
+import org.jboss.netty.handler.codec.http.HttpRequestDecoder;
+import org.jboss.netty.handler.codec.http.HttpResponse;
+import org.jboss.netty.handler.codec.http.HttpResponseEncoder;
+import org.jboss.netty.handler.codec.http.HttpResponseStatus;
+import org.jboss.netty.handler.codec.http.QueryStringDecoder;
+import org.jboss.netty.handler.ssl.SslHandler;
+import org.jboss.netty.handler.stream.ChunkedWriteHandler;
+import org.jboss.netty.util.CharsetUtil;
+
+public class ShuffleHandler {
+
+ private static final Log LOG = LogFactory.getLog(ShuffleHandler.class);
+
+ public static final String SHUFFLE_HANDLER_LOCAL_DIRS = "tez.shuffle.handler.local-dirs";
+
+ public static final String SHUFFLE_MANAGE_OS_CACHE = "mapreduce.shuffle.manage.os.cache";
+ public static final boolean DEFAULT_SHUFFLE_MANAGE_OS_CACHE = true;
+
+ public static final String SHUFFLE_READAHEAD_BYTES = "mapreduce.shuffle.readahead.bytes";
+ public static final int DEFAULT_SHUFFLE_READAHEAD_BYTES = 4 * 1024 * 1024;
+
+ // pattern to identify errors related to the client closing the socket early
+ // idea borrowed from Netty SslHandler
+ private static final Pattern IGNORABLE_ERROR_MESSAGE = Pattern.compile(
+ "^.*(?:connection.*reset|connection.*closed|broken.*pipe).*$",
+ Pattern.CASE_INSENSITIVE);
+
+ private int port;
+ private final ChannelFactory selector;
+ private final ChannelGroup accepted = new DefaultChannelGroup();
+ protected HttpPipelineFactory pipelineFact;
+ private final int sslFileBufferSize;
+ private final Configuration conf;
+
+ private final ConcurrentMap registeredApps = new ConcurrentHashMap();
+
+ /**
+ * Should the shuffle use posix_fadvise calls to manage the OS cache during
+ * sendfile
+ */
+ private final boolean manageOsCache;
+ private final int readaheadLength;
+ private final int maxShuffleConnections;
+ private final int shuffleBufferSize;
+ private final boolean shuffleTransferToAllowed;
+ private final ReadaheadPool readaheadPool = ReadaheadPool.getInstance();
+
+ private Map userRsrc;
+ private JobTokenSecretManager secretManager;
+
+ // TODO Fix this for tez.
+ public static final String MAPREDUCE_SHUFFLE_SERVICEID =
+ "mapreduce_shuffle";
+
+ public static final String SHUFFLE_PORT_CONFIG_KEY = "tez.shuffle.port";
+ public static final int DEFAULT_SHUFFLE_PORT = 15551;
+
+ // TODO Change configs to remove mapreduce references.
+ public static final String SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED =
+ "mapreduce.shuffle.connection-keep-alive.enable";
+ public static final boolean DEFAULT_SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED = false;
+
+ public static final String SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT =
+ "mapreduce.shuffle.connection-keep-alive.timeout";
+ public static final int DEFAULT_SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT = 5; //seconds
+
+ public static final String SHUFFLE_MAPOUTPUT_META_INFO_CACHE_SIZE =
+ "mapreduce.shuffle.mapoutput-info.meta.cache.size";
+ public static final int DEFAULT_SHUFFLE_MAPOUTPUT_META_INFO_CACHE_SIZE =
+ 1000;
+
+ public static final String CONNECTION_CLOSE = "close";
+
+ public static final String SUFFLE_SSL_FILE_BUFFER_SIZE_KEY =
+ "mapreduce.shuffle.ssl.file.buffer.size";
+
+ public static final int DEFAULT_SUFFLE_SSL_FILE_BUFFER_SIZE = 60 * 1024;
+
+ public static final String MAX_SHUFFLE_CONNECTIONS = "mapreduce.shuffle.max.connections";
+ public static final int DEFAULT_MAX_SHUFFLE_CONNECTIONS = 0; // 0 implies no limit
+
+ public static final String MAX_SHUFFLE_THREADS = "mapreduce.shuffle.max.threads";
+ // 0 implies Netty default of 2 * number of available processors
+ public static final int DEFAULT_MAX_SHUFFLE_THREADS = 0;
+
+ public static final String SHUFFLE_BUFFER_SIZE =
+ "mapreduce.shuffle.transfer.buffer.size";
+ public static final int DEFAULT_SHUFFLE_BUFFER_SIZE = 128 * 1024;
+
+ public static final String SHUFFLE_TRANSFERTO_ALLOWED =
+ "mapreduce.shuffle.transferTo.allowed";
+ public static final boolean DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED = true;
+ public static final boolean WINDOWS_DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED =
+ false;
+
+ final boolean connectionKeepAliveEnabled;
+ final int connectionKeepAliveTimeOut;
+ final int mapOutputMetaInfoCacheSize;
+ private static final AtomicBoolean started = new AtomicBoolean(false);
+ private static final AtomicBoolean initing = new AtomicBoolean(false);
+ private static ShuffleHandler INSTANCE;
+
+ @Metrics(about="Shuffle output metrics", context="mapred")
+ static class ShuffleMetrics implements ChannelFutureListener {
+ @Metric("Shuffle output in bytes")
+ MutableCounterLong shuffleOutputBytes;
+ @Metric("# of failed shuffle outputs")
+ MutableCounterInt shuffleOutputsFailed;
+ @Metric("# of succeeeded shuffle outputs")
+ MutableCounterInt shuffleOutputsOK;
+ @Metric("# of current shuffle connections")
+ MutableGaugeInt shuffleConnections;
+
+ @Override
+ public void operationComplete(ChannelFuture future) throws Exception {
+ if (future.isSuccess()) {
+ shuffleOutputsOK.incr();
+ } else {
+ shuffleOutputsFailed.incr();
+ }
+ shuffleConnections.decr();
+ }
+ }
+
+ public ShuffleHandler(Configuration conf) {
+ this.conf = conf;
+ manageOsCache = conf.getBoolean(SHUFFLE_MANAGE_OS_CACHE,
+ DEFAULT_SHUFFLE_MANAGE_OS_CACHE);
+
+ readaheadLength = conf.getInt(SHUFFLE_READAHEAD_BYTES,
+ DEFAULT_SHUFFLE_READAHEAD_BYTES);
+
+ maxShuffleConnections = conf.getInt(MAX_SHUFFLE_CONNECTIONS,
+ DEFAULT_MAX_SHUFFLE_CONNECTIONS);
+ int maxShuffleThreads = conf.getInt(MAX_SHUFFLE_THREADS,
+ DEFAULT_MAX_SHUFFLE_THREADS);
+ if (maxShuffleThreads == 0) {
+ maxShuffleThreads = 2 * Runtime.getRuntime().availableProcessors();
+ }
+
+ shuffleBufferSize = conf.getInt(SHUFFLE_BUFFER_SIZE,
+ DEFAULT_SHUFFLE_BUFFER_SIZE);
+
+ shuffleTransferToAllowed = conf.getBoolean(SHUFFLE_TRANSFERTO_ALLOWED,
+ (Shell.WINDOWS)?WINDOWS_DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED:
+ DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED);
+
+ ThreadFactory bossFactory = new ThreadFactoryBuilder()
+ .setNameFormat("ShuffleHandler Netty Boss #%d")
+ .build();
+ ThreadFactory workerFactory = new ThreadFactoryBuilder()
+ .setNameFormat("ShuffleHandler Netty Worker #%d")
+ .build();
+
+ selector = new NioServerSocketChannelFactory(
+ Executors.newCachedThreadPool(bossFactory),
+ Executors.newCachedThreadPool(workerFactory),
+ maxShuffleThreads);
+
+ sslFileBufferSize = conf.getInt(SUFFLE_SSL_FILE_BUFFER_SIZE_KEY,
+ DEFAULT_SUFFLE_SSL_FILE_BUFFER_SIZE);
+ connectionKeepAliveEnabled =
+ conf.getBoolean(SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED,
+ DEFAULT_SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED);
+ connectionKeepAliveTimeOut =
+ Math.max(1, conf.getInt(SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT,
+ DEFAULT_SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT));
+ mapOutputMetaInfoCacheSize =
+ Math.max(1, conf.getInt(SHUFFLE_MAPOUTPUT_META_INFO_CACHE_SIZE,
+ DEFAULT_SHUFFLE_MAPOUTPUT_META_INFO_CACHE_SIZE));
+
+ userRsrc = new ConcurrentHashMap();
+ secretManager = new JobTokenSecretManager();
+ }
+
+
+ public void start() throws Exception {
+ ServerBootstrap bootstrap = new ServerBootstrap(selector);
+ try {
+ pipelineFact = new HttpPipelineFactory(conf);
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ bootstrap.setPipelineFactory(pipelineFact);
+ port = conf.getInt(SHUFFLE_PORT_CONFIG_KEY, DEFAULT_SHUFFLE_PORT);
+ Channel ch = bootstrap.bind(new InetSocketAddress(port));
+ accepted.add(ch);
+ port = ((InetSocketAddress)ch.getLocalAddress()).getPort();
+ conf.set(SHUFFLE_PORT_CONFIG_KEY, Integer.toString(port));
+ pipelineFact.SHUFFLE.setPort(port);
+ LOG.info("TezShuffleHandler" + " listening on port " + port);
+ }
+
+ public static void initializeAndStart(Configuration conf) throws Exception {
+ if (!initing.getAndSet(true)) {
+ INSTANCE = new ShuffleHandler(conf);
+ INSTANCE.start();
+ started.set(true);
+ }
+ }
+
+ public static ShuffleHandler get() {
+ Preconditions.checkState(started.get(), "ShuffleHandler must be started before invoking started");
+ return INSTANCE;
+ }
+
+ /**
+ * Serialize the shuffle port into a ByteBuffer for use later on.
+ * @param port the port to be sent to the ApplciationMaster
+ * @return the serialized form of the port.
+ */
+ public static ByteBuffer serializeMetaData(int port) throws IOException {
+ //TODO these bytes should be versioned
+ DataOutputBuffer port_dob = new DataOutputBuffer();
+ port_dob.writeInt(port);
+ return ByteBuffer.wrap(port_dob.getData(), 0, port_dob.getLength());
+ }
+
+ /**
+ * A helper function to deserialize the metadata returned by ShuffleHandler.
+ * @param meta the metadata returned by the ShuffleHandler
+ * @return the port the Shuffle Handler is listening on to serve shuffle data.
+ */
+ public static int deserializeMetaData(ByteBuffer meta) throws IOException {
+ //TODO this should be returning a class not just an int
+ DataInputByteBuffer in = new DataInputByteBuffer();
+ in.reset(meta);
+ int port = in.readInt();
+ return port;
+ }
+
+ /**
+ * A helper function to serialize the JobTokenIdentifier to be sent to the
+ * ShuffleHandler as ServiceData.
+ * @param jobToken the job token to be used for authentication of
+ * shuffle data requests.
+ * @return the serialized version of the jobToken.
+ */
+ public static ByteBuffer serializeServiceData(Token jobToken) throws IOException {
+ //TODO these bytes should be versioned
+ DataOutputBuffer jobToken_dob = new DataOutputBuffer();
+ jobToken.write(jobToken_dob);
+ return ByteBuffer.wrap(jobToken_dob.getData(), 0, jobToken_dob.getLength());
+ }
+
+ static Token deserializeServiceData(ByteBuffer secret) throws IOException {
+ DataInputByteBuffer in = new DataInputByteBuffer();
+ in.reset(secret);
+ Token jt = new Token();
+ jt.readFields(in);
+ return jt;
+ }
+
+ public void registerApplication(String applicationIdString, Token appToken,
+ String user) {
+ Boolean registered = registeredApps.putIfAbsent(applicationIdString, Boolean.valueOf(true));
+ if (registered == null) {
+ recordJobShuffleInfo(applicationIdString, user, appToken);
+ }
+ }
+
+ public void unregisterApplication(String applicationIdString) {
+ removeJobShuffleInfo(applicationIdString);
+ }
+
+
+ protected void stop() throws Exception {
+ accepted.close().awaitUninterruptibly(10, TimeUnit.SECONDS);
+ if (selector != null) {
+ ServerBootstrap bootstrap = new ServerBootstrap(selector);
+ bootstrap.releaseExternalResources();
+ }
+ if (pipelineFact != null) {
+ pipelineFact.destroy();
+ }
+ }
+
+ protected Shuffle getShuffle(Configuration conf) {
+ return new Shuffle(conf);
+ }
+
+
+ private void addJobToken(String appIdString, String user,
+ Token jobToken) {
+ String jobIdString = appIdString.replace("application", "job");
+ userRsrc.put(jobIdString, user);
+ secretManager.addTokenForJob(jobIdString, jobToken);
+ LOG.info("Added token for " + jobIdString);
+ }
+
+ private void recordJobShuffleInfo(String appIdString, String user,
+ Token jobToken) {
+ addJobToken(appIdString, user, jobToken);
+ }
+
+ private void removeJobShuffleInfo(String appIdString) {
+ secretManager.removeTokenForJob(appIdString);
+ userRsrc.remove(appIdString);
+ }
+
+ class HttpPipelineFactory implements ChannelPipelineFactory {
+
+ final Shuffle SHUFFLE;
+ private SSLFactory sslFactory;
+
+ public HttpPipelineFactory(Configuration conf) throws Exception {
+ SHUFFLE = getShuffle(conf);
+ // TODO Setup SSL Shuffle
+// if (conf.getBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY,
+// MRConfig.SHUFFLE_SSL_ENABLED_DEFAULT)) {
+// LOG.info("Encrypted shuffle is enabled.");
+// sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
+// sslFactory.init();
+// }
+ }
+
+ public void destroy() {
+ if (sslFactory != null) {
+ sslFactory.destroy();
+ }
+ }
+
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ ChannelPipeline pipeline = Channels.pipeline();
+ if (sslFactory != null) {
+ pipeline.addLast("ssl", new SslHandler(sslFactory.createSSLEngine()));
+ }
+ pipeline.addLast("decoder", new HttpRequestDecoder());
+ pipeline.addLast("aggregator", new HttpChunkAggregator(1 << 16));
+ pipeline.addLast("encoder", new HttpResponseEncoder());
+ pipeline.addLast("chunking", new ChunkedWriteHandler());
+ pipeline.addLast("shuffle", SHUFFLE);
+ return pipeline;
+ // TODO factor security manager into pipeline
+ // TODO factor out encode/decode to permit binary shuffle
+ // TODO factor out decode of index to permit alt. models
+ }
+
+ }
+
+ class Shuffle extends SimpleChannelUpstreamHandler {
+
+ private final Configuration conf;
+ private final IndexCache indexCache;
+ private final LocalDirAllocator lDirAlloc =
+ new LocalDirAllocator(SHUFFLE_HANDLER_LOCAL_DIRS);
+ private int port;
+
+ public Shuffle(Configuration conf) {
+ this.conf = conf;
+ indexCache = new IndexCache(conf);
+ this.port = conf.getInt(SHUFFLE_PORT_CONFIG_KEY, DEFAULT_SHUFFLE_PORT);
+ }
+
+ public void setPort(int port) {
+ this.port = port;
+ }
+
+ private List splitMaps(List mapq) {
+ if (null == mapq) {
+ return null;
+ }
+ final List ret = new ArrayList();
+ for (String s : mapq) {
+ Collections.addAll(ret, s.split(","));
+ }
+ return ret;
+ }
+
+ @Override
+ public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt)
+ throws Exception {
+ if ((maxShuffleConnections > 0) && (accepted.size() >= maxShuffleConnections)) {
+ LOG.info(String.format("Current number of shuffle connections (%d) is " +
+ "greater than or equal to the max allowed shuffle connections (%d)",
+ accepted.size(), maxShuffleConnections));
+ evt.getChannel().close();
+ return;
+ }
+ accepted.add(evt.getChannel());
+ super.channelOpen(ctx, evt);
+
+ }
+
+ @Override
+ public void messageReceived(ChannelHandlerContext ctx, MessageEvent evt)
+ throws Exception {
+ HttpRequest request = (HttpRequest) evt.getMessage();
+ if (request.getMethod() != GET) {
+ sendError(ctx, METHOD_NOT_ALLOWED);
+ return;
+ }
+ // Check whether the shuffle version is compatible
+ if (!ShuffleHeader.DEFAULT_HTTP_HEADER_NAME.equals(
+ request.getHeader(ShuffleHeader.HTTP_HEADER_NAME))
+ || !ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION.equals(
+ request.getHeader(ShuffleHeader.HTTP_HEADER_VERSION))) {
+ sendError(ctx, "Incompatible shuffle request version", BAD_REQUEST);
+ }
+ final Map> q =
+ new QueryStringDecoder(request.getUri()).getParameters();
+ final List keepAliveList = q.get("keepAlive");
+ boolean keepAliveParam = false;
+ if (keepAliveList != null && keepAliveList.size() == 1) {
+ keepAliveParam = Boolean.valueOf(keepAliveList.get(0));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("KeepAliveParam : " + keepAliveList
+ + " : " + keepAliveParam);
+ }
+ }
+ final List mapIds = splitMaps(q.get("map"));
+ final List reduceQ = q.get("reduce");
+ final List jobQ = q.get("job");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("RECV: " + request.getUri() +
+ "\n mapId: " + mapIds +
+ "\n reduceId: " + reduceQ +
+ "\n jobId: " + jobQ +
+ "\n keepAlive: " + keepAliveParam);
+ }
+
+ if (mapIds == null || reduceQ == null || jobQ == null) {
+ sendError(ctx, "Required param job, map and reduce", BAD_REQUEST);
+ return;
+ }
+ if (reduceQ.size() != 1 || jobQ.size() != 1) {
+ sendError(ctx, "Too many job/reduce parameters", BAD_REQUEST);
+ return;
+ }
+ int reduceId;
+ String jobId;
+ try {
+ reduceId = Integer.parseInt(reduceQ.get(0));
+ jobId = jobQ.get(0);
+ } catch (NumberFormatException e) {
+ sendError(ctx, "Bad reduce parameter", BAD_REQUEST);
+ return;
+ } catch (IllegalArgumentException e) {
+ sendError(ctx, "Bad job parameter", BAD_REQUEST);
+ return;
+ }
+ final String reqUri = request.getUri();
+ if (null == reqUri) {
+ // TODO? add upstream?
+ sendError(ctx, FORBIDDEN);
+ return;
+ }
+ HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
+ try {
+ verifyRequest(jobId, ctx, request, response,
+ new URL("http", "", this.port, reqUri));
+ } catch (IOException e) {
+ LOG.warn("Shuffle failure ", e);
+ sendError(ctx, e.getMessage(), UNAUTHORIZED);
+ return;
+ }
+
+ Map mapOutputInfoMap =
+ new HashMap();
+ Channel ch = evt.getChannel();
+ String user = userRsrc.get(jobId);
+
+ // $x/$user/appcache/$appId/output/$mapId
+ // TODO: Once Shuffle is out of NM, this can use MR APIs to convert
+ // between App and Job
+ String outputBasePathStr = getBaseLocation(jobId, user);
+
+ try {
+ populateHeaders(mapIds, outputBasePathStr, user, reduceId, request,
+ response, keepAliveParam, mapOutputInfoMap);
+ } catch(IOException e) {
+ ch.write(response);
+ LOG.error("Shuffle error in populating headers :", e);
+ String errorMessage = getErrorMessage(e);
+ sendError(ctx,errorMessage , INTERNAL_SERVER_ERROR);
+ return;
+ }
+ ch.write(response);
+ // TODO refactor the following into the pipeline
+ ChannelFuture lastMap = null;
+ for (String mapId : mapIds) {
+ try {
+ MapOutputInfo info = mapOutputInfoMap.get(mapId);
+ if (info == null) {
+ info = getMapOutputInfo(outputBasePathStr, mapId, reduceId, user);
+ }
+ lastMap =
+ sendMapOutput(ctx, ch, user, mapId,
+ reduceId, info);
+ if (null == lastMap) {
+ sendError(ctx, NOT_FOUND);
+ return;
+ }
+ } catch (IOException e) {
+ LOG.error("Shuffle error :", e);
+ String errorMessage = getErrorMessage(e);
+ sendError(ctx,errorMessage , INTERNAL_SERVER_ERROR);
+ return;
+ }
+ }
+ lastMap.addListener(ChannelFutureListener.CLOSE);
+ }
+
+ private String getErrorMessage(Throwable t) {
+ StringBuffer sb = new StringBuffer(t.getMessage());
+ while (t.getCause() != null) {
+ sb.append(t.getCause().getMessage());
+ t = t.getCause();
+ }
+ return sb.toString();
+ }
+
+ private final String USERCACHE_CONSTANT = "usercache";
+ private final String APPCACHE_CONSTANT = "appcache";
+
+ private String getBaseLocation(String jobIdString, String user) {
+ String parts[] = jobIdString.split("_");
+ Preconditions.checkArgument(parts.length == 3, "Invalid jobId. Expecting 3 parts");
+ final ApplicationId appID =
+ ApplicationId.newInstance(Long.parseLong(parts[1]), Integer.parseInt(parts[2]));
+ final String baseStr =
+ USERCACHE_CONSTANT + "/" + user + "/"
+ + APPCACHE_CONSTANT + "/"
+ + ConverterUtils.toString(appID) + "/output" + "/";
+ return baseStr;
+ }
+
+ protected MapOutputInfo getMapOutputInfo(String base, String mapId,
+ int reduce, String user) throws IOException {
+ // Index file
+ Path indexFileName =
+ lDirAlloc.getLocalPathToRead(base + "/file.out.index", conf);
+ TezIndexRecord info =
+ indexCache.getIndexInformation(mapId, reduce, indexFileName, user);
+
+ Path mapOutputFileName =
+ lDirAlloc.getLocalPathToRead(base + "/file.out", conf);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(base + " : " + mapOutputFileName + " : " + indexFileName);
+ }
+ MapOutputInfo outputInfo = new MapOutputInfo(mapOutputFileName, info);
+ return outputInfo;
+ }
+
+ protected void populateHeaders(List mapIds, String outputBaseStr,
+ String user, int reduce, HttpRequest request, HttpResponse response,
+ boolean keepAliveParam, Map mapOutputInfoMap)
+ throws IOException {
+
+ long contentLength = 0;
+ for (String mapId : mapIds) {
+ String base = outputBaseStr + mapId;
+ MapOutputInfo outputInfo = getMapOutputInfo(base, mapId, reduce, user);
+ if (mapOutputInfoMap.size() < mapOutputMetaInfoCacheSize) {
+ mapOutputInfoMap.put(mapId, outputInfo);
+ }
+ // Index file
+ Path indexFileName =
+ lDirAlloc.getLocalPathToRead(base + "/file.out.index", conf);
+ TezIndexRecord info =
+ indexCache.getIndexInformation(mapId, reduce, indexFileName, user);
+ ShuffleHeader header =
+ new ShuffleHeader(mapId, info.getPartLength(), info.getRawLength(), reduce);
+ DataOutputBuffer dob = new DataOutputBuffer();
+ header.write(dob);
+
+ contentLength += info.getPartLength();
+ contentLength += dob.getLength();
+ }
+
+ // Now set the response headers.
+ setResponseHeaders(response, keepAliveParam, contentLength);
+ }
+
+ protected void setResponseHeaders(HttpResponse response,
+ boolean keepAliveParam, long contentLength) {
+ if (!connectionKeepAliveEnabled && !keepAliveParam) {
+ LOG.info("Setting connection close header...");
+ response.setHeader(HttpHeaders.Names.CONNECTION, CONNECTION_CLOSE);
+ } else {
+ response.setHeader(HttpHeaders.Names.CONTENT_LENGTH,
+ String.valueOf(contentLength));
+ response.setHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
+ response.setHeader(HttpHeaders.Values.KEEP_ALIVE, "timeout="
+ + connectionKeepAliveTimeOut);
+ LOG.info("Content Length in shuffle : " + contentLength);
+ }
+ }
+
+ class MapOutputInfo {
+ final Path mapOutputFileName;
+ final TezIndexRecord indexRecord;
+
+ MapOutputInfo(Path mapOutputFileName, TezIndexRecord indexRecord) {
+ this.mapOutputFileName = mapOutputFileName;
+ this.indexRecord = indexRecord;
+ }
+ }
+
+ protected void verifyRequest(String appid, ChannelHandlerContext ctx,
+ HttpRequest request, HttpResponse response, URL requestUri)
+ throws IOException {
+ SecretKey tokenSecret = secretManager.retrieveTokenSecret(appid);
+ if (null == tokenSecret) {
+ LOG.info("Request for unknown token " + appid);
+ throw new IOException("could not find jobid");
+ }
+ // string to encrypt
+ String enc_str = SecureShuffleUtils.buildMsgFrom(requestUri);
+ // hash from the fetcher
+ String urlHashStr =
+ request.getHeader(SecureShuffleUtils.HTTP_HEADER_URL_HASH);
+ if (urlHashStr == null) {
+ LOG.info("Missing header hash for " + appid);
+ throw new IOException("fetcher cannot be authenticated");
+ }
+ if (LOG.isDebugEnabled()) {
+ int len = urlHashStr.length();
+ LOG.debug("verifying request. enc_str=" + enc_str + "; hash=..." +
+ urlHashStr.substring(len-len/2, len-1));
+ }
+ // verify - throws exception
+ SecureShuffleUtils.verifyReply(urlHashStr, enc_str, tokenSecret);
+ // verification passed - encode the reply
+ String reply =
+ SecureShuffleUtils.generateHash(urlHashStr.getBytes(Charsets.UTF_8),
+ tokenSecret);
+ response.setHeader(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH, reply);
+ // Put shuffle version into http header
+ response.setHeader(ShuffleHeader.HTTP_HEADER_NAME,
+ ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
+ response.setHeader(ShuffleHeader.HTTP_HEADER_VERSION,
+ ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
+ if (LOG.isDebugEnabled()) {
+ int len = reply.length();
+ LOG.debug("Fetcher request verfied. enc_str=" + enc_str + ";reply=" +
+ reply.substring(len-len/2, len-1));
+ }
+ }
+
+ protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch,
+ String user, String mapId, int reduce, MapOutputInfo mapOutputInfo)
+ throws IOException {
+ final TezIndexRecord info = mapOutputInfo.indexRecord;
+ final ShuffleHeader header =
+ new ShuffleHeader(mapId, info.getPartLength(), info.getRawLength(), reduce);
+ final DataOutputBuffer dob = new DataOutputBuffer();
+ header.write(dob);
+ ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
+ final File spillfile =
+ new File(mapOutputInfo.mapOutputFileName.toString());
+ RandomAccessFile spill;
+ try {
+ spill = SecureIOUtils.openForRandomRead(spillfile, "r", user, null);
+ } catch (FileNotFoundException e) {
+ LOG.info(spillfile + " not found");
+ return null;
+ }
+ ChannelFuture writeFuture;
+ if (ch.getPipeline().get(SslHandler.class) == null) {
+ final FadvisedFileRegion partition = new FadvisedFileRegion(spill,
+ info.getStartOffset(), info.getPartLength(), manageOsCache, readaheadLength,
+ readaheadPool, spillfile.getAbsolutePath(),
+ shuffleBufferSize, shuffleTransferToAllowed);
+ writeFuture = ch.write(partition);
+ writeFuture.addListener(new ChannelFutureListener() {
+ // TODO error handling; distinguish IO/connection failures,
+ // attribute to appropriate spill output
+ @Override
+ public void operationComplete(ChannelFuture future) {
+ if (future.isSuccess()) {
+ partition.transferSuccessful();
+ }
+ partition.releaseExternalResources();
+ }
+ });
+ } else {
+ // HTTPS cannot be done with zero copy.
+ final FadvisedChunkedFile chunk = new FadvisedChunkedFile(spill,
+ info.getStartOffset(), info.getPartLength(), sslFileBufferSize,
+ manageOsCache, readaheadLength, readaheadPool,
+ spillfile.getAbsolutePath());
+ writeFuture = ch.write(chunk);
+ }
+ return writeFuture;
+ }
+
+ protected void sendError(ChannelHandlerContext ctx,
+ HttpResponseStatus status) {
+ sendError(ctx, "", status);
+ }
+
+ protected void sendError(ChannelHandlerContext ctx, String message,
+ HttpResponseStatus status) {
+ HttpResponse response = new DefaultHttpResponse(HTTP_1_1, status);
+ response.setHeader(CONTENT_TYPE, "text/plain; charset=UTF-8");
+ // Put shuffle version into http header
+ response.setHeader(ShuffleHeader.HTTP_HEADER_NAME,
+ ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
+ response.setHeader(ShuffleHeader.HTTP_HEADER_VERSION,
+ ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
+ response.setContent(
+ ChannelBuffers.copiedBuffer(message, CharsetUtil.UTF_8));
+
+ // Close the connection as soon as the error message is sent.
+ ctx.getChannel().write(response).addListener(ChannelFutureListener.CLOSE);
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
+ throws Exception {
+ Channel ch = e.getChannel();
+ Throwable cause = e.getCause();
+ if (cause instanceof TooLongFrameException) {
+ sendError(ctx, BAD_REQUEST);
+ return;
+ } else if (cause instanceof IOException) {
+ if (cause instanceof ClosedChannelException) {
+ LOG.debug("Ignoring closed channel error", cause);
+ return;
+ }
+ String message = String.valueOf(cause.getMessage());
+ if (IGNORABLE_ERROR_MESSAGE.matcher(message).matches()) {
+ LOG.debug("Ignoring client socket close", cause);
+ return;
+ }
+ }
+
+ LOG.error("Shuffle error: ", cause);
+ if (ch.isConnected()) {
+ LOG.error("Shuffle error " + e);
+ sendError(ctx, INTERNAL_SERVER_ERROR);
+ }
+ }
+ }
+}
diff --git llap-server/src/java/org/apache/tez/dag/app/launcher/DaemonContainerLauncher.java llap-server/src/java/org/apache/tez/dag/app/launcher/DaemonContainerLauncher.java
new file mode 100644
index 0000000..7d41849
--- /dev/null
+++ llap-server/src/java/org/apache/tez/dag/app/launcher/DaemonContainerLauncher.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tez.dag.app.launcher;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.ByteString;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.hive.llap.daemon.LlapDaemonConfiguration;
+import org.apache.hadoop.hive.llap.daemon.LlapDaemonProtocolBlockingPB;
+import org.apache.hadoop.hive.llap.daemon.impl.LlapDaemonProtocolClientImpl;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto;
+import org.apache.tez.dag.app.AppContext;
+import org.apache.tez.dag.app.TaskAttemptListener;
+import org.apache.tez.dag.app.rm.NMCommunicatorEvent;
+import org.apache.tez.dag.app.rm.NMCommunicatorLaunchRequestEvent;
+import org.apache.tez.dag.app.rm.container.AMContainerEvent;
+import org.apache.tez.dag.app.rm.container.AMContainerEventLaunchFailed;
+import org.apache.tez.dag.app.rm.container.AMContainerEventLaunched;
+import org.apache.tez.dag.app.rm.container.AMContainerEventType;
+import org.apache.tez.dag.history.DAGHistoryEvent;
+import org.apache.tez.dag.history.events.ContainerLaunchedEvent;
+
+public class DaemonContainerLauncher extends AbstractService implements ContainerLauncher {
+
+ // TODO Support interruptability of tasks which haven't yet been launched.
+
+ // TODO May need multiple connections per target machine, depending upon how synchronization is handled in the RPC layer
+
+ static final Log LOG = LogFactory.getLog(DaemonContainerLauncher.class);
+
+ private final AppContext context;
+ private final ListeningExecutorService executor;
+ private final String tokenIdentifier;
+ private final TaskAttemptListener tal;
+ private final Map proxyMap;
+ private final int servicePort;
+ private final Clock clock;
+
+
+ // Configuration passed in here to set up final parameters
+ public DaemonContainerLauncher(AppContext appContext, Configuration conf,
+ TaskAttemptListener tal) {
+ super(DaemonContainerLauncher.class.getName());
+ this.clock = appContext.getClock();
+ // TODO Scale this based on numDaemons / threads per daemon
+ int numThreads = conf.getInt(LlapDaemonConfiguration.LLAP_DAEMON_AM_COMMUNICATOR_NUM_THREADS,
+ LlapDaemonConfiguration.LLAP_DAEMON_AM_COMMUNICATOR_NUM_THREADS_DEFAULT);
+ this.servicePort = conf.getInt(LlapDaemonConfiguration.LLAP_DAEMON_RPC_PORT,
+ LlapDaemonConfiguration.LLAP_DAEMON_RPC_PORT_DEFAULT);
+ ExecutorService localExecutor = Executors.newFixedThreadPool(numThreads,
+ new ThreadFactoryBuilder().setNameFormat("DaemonCommunicator #%2d").build());
+ executor = MoreExecutors.listeningDecorator(localExecutor);
+ this.context = appContext;
+ this.tokenIdentifier = context.getApplicationID().toString();
+ this.tal = tal;
+ this.proxyMap = new HashMap();
+ }
+
+ public void serviceStop() {
+ executor.shutdownNow();
+ }
+
+ private synchronized LlapDaemonProtocolBlockingPB getProxy(String hostname) {
+ LlapDaemonProtocolBlockingPB proxy = proxyMap.get(hostname);
+ if (proxy == null) {
+ proxy = new LlapDaemonProtocolClientImpl(getConfig(), hostname, servicePort);
+ proxyMap.put(hostname, proxy);
+ }
+ return proxy;
+ }
+
+ @Override
+ public void handle(NMCommunicatorEvent event) {
+ switch (event.getType()) {
+ case CONTAINER_LAUNCH_REQUEST:
+ NMCommunicatorLaunchRequestEvent launchEvent = (NMCommunicatorLaunchRequestEvent) event;
+ ListenableFuture future = executor.submit(
+ new SubmitCallable(getProxy(launchEvent.getNodeId().getHost()), launchEvent,
+ tokenIdentifier, tal.getAddress().getHostName(), tal.getAddress().getPort()));
+ Futures.addCallback(future, new SubmitCallback(launchEvent.getContainerId(),
+ launchEvent.getContainer().getNodeId().getHost()));
+ break;
+ case CONTAINER_STOP_REQUEST:
+ LOG.info("DEBUG: Ignoring STOP_REQUEST for event: " + event);
+ // TODO should this be sending out a Container terminated message ? Noone tells AMContainer
+ // that the container is actually done (normally received from RM)
+ // TODO Sending this out for an unlaunched container is invalid
+ context.getEventHandler().handle(new AMContainerEvent(event.getContainerId(),
+ AMContainerEventType.C_NM_STOP_SENT));
+ break;
+ }
+ }
+
+
+ private static class SubmitCallable implements Callable {
+
+ private final NMCommunicatorLaunchRequestEvent event;
+ private final String tokenIdentifier;
+ private final String amHost;
+ private final int amPort;
+ private final LlapDaemonProtocolBlockingPB daemonProxy;
+
+ private SubmitCallable(LlapDaemonProtocolBlockingPB daemonProxy,
+ NMCommunicatorLaunchRequestEvent event, String tokenIdentifier,
+ String amHost, int amPort) {
+ this.event = event;
+ this.daemonProxy = daemonProxy;
+ this.tokenIdentifier = tokenIdentifier;
+ this.amHost = amHost;
+ this.amPort = amPort;
+ }
+
+
+ @Override
+ public Void call() throws Exception {
+ RunContainerRequestProto.Builder requestBuilder = RunContainerRequestProto.newBuilder();
+ // Need the taskAttemptListenerAddress
+ requestBuilder.setAmHost(amHost).setAmPort(amPort);
+ requestBuilder.setAppAttemptNumber(event.getContainer().getId().getApplicationAttemptId().getAttemptId());
+ requestBuilder.setApplicationIdString(
+ event.getContainer().getId().getApplicationAttemptId().getApplicationId().toString());
+ requestBuilder.setTokenIdentifier(tokenIdentifier);
+ requestBuilder.setContainerIdString(event.getContainer().getId().toString());
+ requestBuilder.setCredentialsBinary(
+ ByteString.copyFrom(event.getContainerLaunchContext().getTokens()));
+ requestBuilder.setUser(System.getenv(ApplicationConstants.Environment.USER.name()));
+
+ RunContainerRequestProto request = requestBuilder.build();
+ daemonProxy.runContainer(null, request);
+ return null;
+ }
+ }
+
+ private class SubmitCallback implements FutureCallback {
+
+ private final ContainerId containerId;
+ private final String host;
+
+ private SubmitCallback(ContainerId containerId, String host) {
+ this.containerId = containerId;
+ this.host = host;
+ }
+
+ @Override
+ public void onSuccess(Void result) {
+ LOG.info("Container: " + containerId + " launch succeeded on host: " + host);
+ context.getEventHandler().handle(new AMContainerEventLaunched(containerId));
+ ContainerLaunchedEvent lEvt = new ContainerLaunchedEvent(
+ containerId, clock.getTime(), context.getApplicationAttemptId());
+ context.getHistoryHandler().handle(new DAGHistoryEvent(
+ null, lEvt));
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error("Failed to launch container: " + containerId + " on host: " + host, t);
+ sendContainerLaunchFailedMsg(containerId, t);
+
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ void sendContainerLaunchFailedMsg(ContainerId containerId, Throwable t) {
+ context.getEventHandler().handle(new AMContainerEventLaunchFailed(containerId, t == null ? "" : t.getMessage()));
+ }
+
+
+}
diff --git llap-server/src/java/org/apache/tez/dag/app/rm/DaemonTaskSchedulerService.java llap-server/src/java/org/apache/tez/dag/app/rm/DaemonTaskSchedulerService.java
new file mode 100644
index 0000000..8746df0
--- /dev/null
+++ llap-server/src/java/org/apache/tez/dag/app/rm/DaemonTaskSchedulerService.java
@@ -0,0 +1,326 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tez.dag.app.rm;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.google.common.primitives.Ints;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.hive.llap.daemon.LlapDaemonConfiguration;
+import org.apache.tez.dag.api.TezUncheckedException;
+import org.apache.tez.dag.app.AppContext;
+
+
+// TODO Registration with RM - so that the AM is considered dead and restarted in the expiry interval - 10 minutes.
+
+public class DaemonTaskSchedulerService extends TaskSchedulerService {
+
+ private static final Log LOG = LogFactory.getLog(DaemonTaskSchedulerService.class);
+
+ private final ExecutorService appCallbackExecutor;
+ private final TaskSchedulerAppCallback appClientDelegate;
+ private final AppContext appContext;
+ private final List serviceHosts;
+ private final ContainerFactory containerFactory;
+ private final Random random = new Random();
+
+ private final String clientHostname;
+ private final int clientPort;
+ private final String trackingUrl;
+ private final AtomicBoolean isStopped = new AtomicBoolean(false);
+ private final ConcurrentMap