diff --git llap-server/bin/llap-daemon-env.sh llap-server/bin/llap-daemon-env.sh
index a9d1cff..a2298ad 100755
--- llap-server/bin/llap-daemon-env.sh
+++ llap-server/bin/llap-daemon-env.sh
@@ -1,3 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+
+
# Path to the directory which containts hive-exec and hive-llap-server
#export LLAP_DAEMON_HOME=
diff --git llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
index 66122c1..2caca26 100644
--- llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
+++ llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
@@ -8,7 +8,5452 @@ private LlapDaemonProtocolProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
- public interface RunContainerRequestProtoOrBuilder
+ public interface UserPayloadProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional bytes user_payload = 1;
+ /**
+ * optional bytes user_payload = 1;
+ */
+ boolean hasUserPayload();
+ /**
+ * optional bytes user_payload = 1;
+ */
+ com.google.protobuf.ByteString getUserPayload();
+
+ // optional int32 version = 2;
+ /**
+ * optional int32 version = 2;
+ */
+ boolean hasVersion();
+ /**
+ * optional int32 version = 2;
+ */
+ int getVersion();
+ }
+ /**
+ * Protobuf type {@code UserPayloadProto}
+ */
+ public static final class UserPayloadProto extends
+ com.google.protobuf.GeneratedMessage
+ implements UserPayloadProtoOrBuilder {
+ // Use UserPayloadProto.newBuilder() to construct.
+ private UserPayloadProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private UserPayloadProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final UserPayloadProto defaultInstance;
+ public static UserPayloadProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public UserPayloadProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private UserPayloadProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ userPayload_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ version_ = input.readInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UserPayloadProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UserPayloadProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public UserPayloadProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new UserPayloadProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional bytes user_payload = 1;
+ public static final int USER_PAYLOAD_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString userPayload_;
+ /**
+ * optional bytes user_payload = 1;
+ */
+ public boolean hasUserPayload() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional bytes user_payload = 1;
+ */
+ public com.google.protobuf.ByteString getUserPayload() {
+ return userPayload_;
+ }
+
+ // optional int32 version = 2;
+ public static final int VERSION_FIELD_NUMBER = 2;
+ private int version_;
+ /**
+ * optional int32 version = 2;
+ */
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional int32 version = 2;
+ */
+ public int getVersion() {
+ return version_;
+ }
+
+ private void initFields() {
+ userPayload_ = com.google.protobuf.ByteString.EMPTY;
+ version_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, userPayload_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeInt32(2, version_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, userPayload_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(2, version_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto) obj;
+
+ boolean result = true;
+ result = result && (hasUserPayload() == other.hasUserPayload());
+ if (hasUserPayload()) {
+ result = result && getUserPayload()
+ .equals(other.getUserPayload());
+ }
+ result = result && (hasVersion() == other.hasVersion());
+ if (hasVersion()) {
+ result = result && (getVersion()
+ == other.getVersion());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserPayload()) {
+ hash = (37 * hash) + USER_PAYLOAD_FIELD_NUMBER;
+ hash = (53 * hash) + getUserPayload().hashCode();
+ }
+ if (hasVersion()) {
+ hash = (37 * hash) + VERSION_FIELD_NUMBER;
+ hash = (53 * hash) + getVersion();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code UserPayloadProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UserPayloadProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UserPayloadProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ userPayload_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ version_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UserPayloadProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.userPayload_ = userPayload_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.version_ = version_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance()) return this;
+ if (other.hasUserPayload()) {
+ setUserPayload(other.getUserPayload());
+ }
+ if (other.hasVersion()) {
+ setVersion(other.getVersion());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional bytes user_payload = 1;
+ private com.google.protobuf.ByteString userPayload_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * optional bytes user_payload = 1;
+ */
+ public boolean hasUserPayload() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional bytes user_payload = 1;
+ */
+ public com.google.protobuf.ByteString getUserPayload() {
+ return userPayload_;
+ }
+ /**
+ * optional bytes user_payload = 1;
+ */
+ public Builder setUserPayload(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ userPayload_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional bytes user_payload = 1;
+ */
+ public Builder clearUserPayload() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ userPayload_ = getDefaultInstance().getUserPayload();
+ onChanged();
+ return this;
+ }
+
+ // optional int32 version = 2;
+ private int version_ ;
+ /**
+ * optional int32 version = 2;
+ */
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional int32 version = 2;
+ */
+ public int getVersion() {
+ return version_;
+ }
+ /**
+ * optional int32 version = 2;
+ */
+ public Builder setVersion(int value) {
+ bitField0_ |= 0x00000002;
+ version_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 version = 2;
+ */
+ public Builder clearVersion() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ version_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:UserPayloadProto)
+ }
+
+ static {
+ defaultInstance = new UserPayloadProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:UserPayloadProto)
+ }
+
+ public interface EntityDescriptorProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string class_name = 1;
+ /**
+ * optional string class_name = 1;
+ */
+ boolean hasClassName();
+ /**
+ * optional string class_name = 1;
+ */
+ java.lang.String getClassName();
+ /**
+ * optional string class_name = 1;
+ */
+ com.google.protobuf.ByteString
+ getClassNameBytes();
+
+ // optional .UserPayloadProto user_payload = 2;
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ boolean hasUserPayload();
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto getUserPayload();
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder getUserPayloadOrBuilder();
+
+ // optional bytes history_text = 3;
+ /**
+ * optional bytes history_text = 3;
+ */
+ boolean hasHistoryText();
+ /**
+ * optional bytes history_text = 3;
+ */
+ com.google.protobuf.ByteString getHistoryText();
+ }
+ /**
+ * Protobuf type {@code EntityDescriptorProto}
+ */
+ public static final class EntityDescriptorProto extends
+ com.google.protobuf.GeneratedMessage
+ implements EntityDescriptorProtoOrBuilder {
+ // Use EntityDescriptorProto.newBuilder() to construct.
+ private EntityDescriptorProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private EntityDescriptorProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final EntityDescriptorProto defaultInstance;
+ public static EntityDescriptorProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public EntityDescriptorProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private EntityDescriptorProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ className_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = userPayload_.toBuilder();
+ }
+ userPayload_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userPayload_);
+ userPayload_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ historyText_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EntityDescriptorProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EntityDescriptorProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public EntityDescriptorProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new EntityDescriptorProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string class_name = 1;
+ public static final int CLASS_NAME_FIELD_NUMBER = 1;
+ private java.lang.Object className_;
+ /**
+ * optional string class_name = 1;
+ */
+ public boolean hasClassName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string class_name = 1;
+ */
+ public java.lang.String getClassName() {
+ java.lang.Object ref = className_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ className_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string class_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getClassNameBytes() {
+ java.lang.Object ref = className_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ className_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional .UserPayloadProto user_payload = 2;
+ public static final int USER_PAYLOAD_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto userPayload_;
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ public boolean hasUserPayload() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto getUserPayload() {
+ return userPayload_;
+ }
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder getUserPayloadOrBuilder() {
+ return userPayload_;
+ }
+
+ // optional bytes history_text = 3;
+ public static final int HISTORY_TEXT_FIELD_NUMBER = 3;
+ private com.google.protobuf.ByteString historyText_;
+ /**
+ * optional bytes history_text = 3;
+ */
+ public boolean hasHistoryText() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional bytes history_text = 3;
+ */
+ public com.google.protobuf.ByteString getHistoryText() {
+ return historyText_;
+ }
+
+ private void initFields() {
+ className_ = "";
+ userPayload_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance();
+ historyText_ = com.google.protobuf.ByteString.EMPTY;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getClassNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, userPayload_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, historyText_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getClassNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, userPayload_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, historyText_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto) obj;
+
+ boolean result = true;
+ result = result && (hasClassName() == other.hasClassName());
+ if (hasClassName()) {
+ result = result && getClassName()
+ .equals(other.getClassName());
+ }
+ result = result && (hasUserPayload() == other.hasUserPayload());
+ if (hasUserPayload()) {
+ result = result && getUserPayload()
+ .equals(other.getUserPayload());
+ }
+ result = result && (hasHistoryText() == other.hasHistoryText());
+ if (hasHistoryText()) {
+ result = result && getHistoryText()
+ .equals(other.getHistoryText());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasClassName()) {
+ hash = (37 * hash) + CLASS_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getClassName().hashCode();
+ }
+ if (hasUserPayload()) {
+ hash = (37 * hash) + USER_PAYLOAD_FIELD_NUMBER;
+ hash = (53 * hash) + getUserPayload().hashCode();
+ }
+ if (hasHistoryText()) {
+ hash = (37 * hash) + HISTORY_TEXT_FIELD_NUMBER;
+ hash = (53 * hash) + getHistoryText().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code EntityDescriptorProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EntityDescriptorProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EntityDescriptorProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserPayloadFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ className_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (userPayloadBuilder_ == null) {
+ userPayload_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance();
+ } else {
+ userPayloadBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ historyText_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EntityDescriptorProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.className_ = className_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (userPayloadBuilder_ == null) {
+ result.userPayload_ = userPayload_;
+ } else {
+ result.userPayload_ = userPayloadBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.historyText_ = historyText_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance()) return this;
+ if (other.hasClassName()) {
+ bitField0_ |= 0x00000001;
+ className_ = other.className_;
+ onChanged();
+ }
+ if (other.hasUserPayload()) {
+ mergeUserPayload(other.getUserPayload());
+ }
+ if (other.hasHistoryText()) {
+ setHistoryText(other.getHistoryText());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string class_name = 1;
+ private java.lang.Object className_ = "";
+ /**
+ * optional string class_name = 1;
+ */
+ public boolean hasClassName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string class_name = 1;
+ */
+ public java.lang.String getClassName() {
+ java.lang.Object ref = className_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ className_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string class_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getClassNameBytes() {
+ java.lang.Object ref = className_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ className_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string class_name = 1;
+ */
+ public Builder setClassName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ className_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string class_name = 1;
+ */
+ public Builder clearClassName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ className_ = getDefaultInstance().getClassName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string class_name = 1;
+ */
+ public Builder setClassNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ className_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional .UserPayloadProto user_payload = 2;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto userPayload_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder> userPayloadBuilder_;
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ public boolean hasUserPayload() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto getUserPayload() {
+ if (userPayloadBuilder_ == null) {
+ return userPayload_;
+ } else {
+ return userPayloadBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ public Builder setUserPayload(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto value) {
+ if (userPayloadBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userPayload_ = value;
+ onChanged();
+ } else {
+ userPayloadBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ public Builder setUserPayload(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder builderForValue) {
+ if (userPayloadBuilder_ == null) {
+ userPayload_ = builderForValue.build();
+ onChanged();
+ } else {
+ userPayloadBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ public Builder mergeUserPayload(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto value) {
+ if (userPayloadBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ userPayload_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance()) {
+ userPayload_ =
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.newBuilder(userPayload_).mergeFrom(value).buildPartial();
+ } else {
+ userPayload_ = value;
+ }
+ onChanged();
+ } else {
+ userPayloadBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ public Builder clearUserPayload() {
+ if (userPayloadBuilder_ == null) {
+ userPayload_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance();
+ onChanged();
+ } else {
+ userPayloadBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder getUserPayloadBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getUserPayloadFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder getUserPayloadOrBuilder() {
+ if (userPayloadBuilder_ != null) {
+ return userPayloadBuilder_.getMessageOrBuilder();
+ } else {
+ return userPayload_;
+ }
+ }
+ /**
+ * optional .UserPayloadProto user_payload = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder>
+ getUserPayloadFieldBuilder() {
+ if (userPayloadBuilder_ == null) {
+ userPayloadBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder>(
+ userPayload_,
+ getParentForChildren(),
+ isClean());
+ userPayload_ = null;
+ }
+ return userPayloadBuilder_;
+ }
+
+ // optional bytes history_text = 3;
+ private com.google.protobuf.ByteString historyText_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * optional bytes history_text = 3;
+ */
+ public boolean hasHistoryText() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional bytes history_text = 3;
+ */
+ public com.google.protobuf.ByteString getHistoryText() {
+ return historyText_;
+ }
+ /**
+ * optional bytes history_text = 3;
+ */
+ public Builder setHistoryText(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ historyText_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional bytes history_text = 3;
+ */
+ public Builder clearHistoryText() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ historyText_ = getDefaultInstance().getHistoryText();
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:EntityDescriptorProto)
+ }
+
+ static {
+ defaultInstance = new EntityDescriptorProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:EntityDescriptorProto)
+ }
+
+ public interface IOSpecProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string connected_vertex_name = 1;
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ boolean hasConnectedVertexName();
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ java.lang.String getConnectedVertexName();
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ com.google.protobuf.ByteString
+ getConnectedVertexNameBytes();
+
+ // optional .EntityDescriptorProto io_descriptor = 2;
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ boolean hasIoDescriptor();
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getIoDescriptor();
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getIoDescriptorOrBuilder();
+
+ // optional int32 physical_edge_count = 3;
+ /**
+ * optional int32 physical_edge_count = 3;
+ */
+ boolean hasPhysicalEdgeCount();
+ /**
+ * optional int32 physical_edge_count = 3;
+ */
+ int getPhysicalEdgeCount();
+ }
+ /**
+ * Protobuf type {@code IOSpecProto}
+ */
+ public static final class IOSpecProto extends
+ com.google.protobuf.GeneratedMessage
+ implements IOSpecProtoOrBuilder {
+ // Use IOSpecProto.newBuilder() to construct.
+ private IOSpecProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private IOSpecProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final IOSpecProto defaultInstance;
+ public static IOSpecProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public IOSpecProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private IOSpecProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ connectedVertexName_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = ioDescriptor_.toBuilder();
+ }
+ ioDescriptor_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(ioDescriptor_);
+ ioDescriptor_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ physicalEdgeCount_ = input.readInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_IOSpecProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_IOSpecProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public IOSpecProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new IOSpecProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string connected_vertex_name = 1;
+ public static final int CONNECTED_VERTEX_NAME_FIELD_NUMBER = 1;
+ private java.lang.Object connectedVertexName_;
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ public boolean hasConnectedVertexName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ public java.lang.String getConnectedVertexName() {
+ java.lang.Object ref = connectedVertexName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ connectedVertexName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getConnectedVertexNameBytes() {
+ java.lang.Object ref = connectedVertexName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ connectedVertexName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional .EntityDescriptorProto io_descriptor = 2;
+ public static final int IO_DESCRIPTOR_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto ioDescriptor_;
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ public boolean hasIoDescriptor() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getIoDescriptor() {
+ return ioDescriptor_;
+ }
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getIoDescriptorOrBuilder() {
+ return ioDescriptor_;
+ }
+
+ // optional int32 physical_edge_count = 3;
+ public static final int PHYSICAL_EDGE_COUNT_FIELD_NUMBER = 3;
+ private int physicalEdgeCount_;
+ /**
+ * optional int32 physical_edge_count = 3;
+ */
+ public boolean hasPhysicalEdgeCount() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int32 physical_edge_count = 3;
+ */
+ public int getPhysicalEdgeCount() {
+ return physicalEdgeCount_;
+ }
+
+ private void initFields() {
+ connectedVertexName_ = "";
+ ioDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ physicalEdgeCount_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getConnectedVertexNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, ioDescriptor_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt32(3, physicalEdgeCount_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getConnectedVertexNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, ioDescriptor_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, physicalEdgeCount_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto) obj;
+
+ boolean result = true;
+ result = result && (hasConnectedVertexName() == other.hasConnectedVertexName());
+ if (hasConnectedVertexName()) {
+ result = result && getConnectedVertexName()
+ .equals(other.getConnectedVertexName());
+ }
+ result = result && (hasIoDescriptor() == other.hasIoDescriptor());
+ if (hasIoDescriptor()) {
+ result = result && getIoDescriptor()
+ .equals(other.getIoDescriptor());
+ }
+ result = result && (hasPhysicalEdgeCount() == other.hasPhysicalEdgeCount());
+ if (hasPhysicalEdgeCount()) {
+ result = result && (getPhysicalEdgeCount()
+ == other.getPhysicalEdgeCount());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasConnectedVertexName()) {
+ hash = (37 * hash) + CONNECTED_VERTEX_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getConnectedVertexName().hashCode();
+ }
+ if (hasIoDescriptor()) {
+ hash = (37 * hash) + IO_DESCRIPTOR_FIELD_NUMBER;
+ hash = (53 * hash) + getIoDescriptor().hashCode();
+ }
+ if (hasPhysicalEdgeCount()) {
+ hash = (37 * hash) + PHYSICAL_EDGE_COUNT_FIELD_NUMBER;
+ hash = (53 * hash) + getPhysicalEdgeCount();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code IOSpecProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_IOSpecProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_IOSpecProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getIoDescriptorFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ connectedVertexName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (ioDescriptorBuilder_ == null) {
+ ioDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ } else {
+ ioDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ physicalEdgeCount_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_IOSpecProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.connectedVertexName_ = connectedVertexName_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (ioDescriptorBuilder_ == null) {
+ result.ioDescriptor_ = ioDescriptor_;
+ } else {
+ result.ioDescriptor_ = ioDescriptorBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.physicalEdgeCount_ = physicalEdgeCount_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.getDefaultInstance()) return this;
+ if (other.hasConnectedVertexName()) {
+ bitField0_ |= 0x00000001;
+ connectedVertexName_ = other.connectedVertexName_;
+ onChanged();
+ }
+ if (other.hasIoDescriptor()) {
+ mergeIoDescriptor(other.getIoDescriptor());
+ }
+ if (other.hasPhysicalEdgeCount()) {
+ setPhysicalEdgeCount(other.getPhysicalEdgeCount());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string connected_vertex_name = 1;
+ private java.lang.Object connectedVertexName_ = "";
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ public boolean hasConnectedVertexName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ public java.lang.String getConnectedVertexName() {
+ java.lang.Object ref = connectedVertexName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ connectedVertexName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getConnectedVertexNameBytes() {
+ java.lang.Object ref = connectedVertexName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ connectedVertexName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ public Builder setConnectedVertexName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ connectedVertexName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ public Builder clearConnectedVertexName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ connectedVertexName_ = getDefaultInstance().getConnectedVertexName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string connected_vertex_name = 1;
+ */
+ public Builder setConnectedVertexNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ connectedVertexName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional .EntityDescriptorProto io_descriptor = 2;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto ioDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder> ioDescriptorBuilder_;
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ public boolean hasIoDescriptor() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getIoDescriptor() {
+ if (ioDescriptorBuilder_ == null) {
+ return ioDescriptor_;
+ } else {
+ return ioDescriptorBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ public Builder setIoDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) {
+ if (ioDescriptorBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ioDescriptor_ = value;
+ onChanged();
+ } else {
+ ioDescriptorBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ public Builder setIoDescriptor(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder builderForValue) {
+ if (ioDescriptorBuilder_ == null) {
+ ioDescriptor_ = builderForValue.build();
+ onChanged();
+ } else {
+ ioDescriptorBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ public Builder mergeIoDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) {
+ if (ioDescriptorBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ ioDescriptor_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance()) {
+ ioDescriptor_ =
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder(ioDescriptor_).mergeFrom(value).buildPartial();
+ } else {
+ ioDescriptor_ = value;
+ }
+ onChanged();
+ } else {
+ ioDescriptorBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ public Builder clearIoDescriptor() {
+ if (ioDescriptorBuilder_ == null) {
+ ioDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ onChanged();
+ } else {
+ ioDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder getIoDescriptorBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getIoDescriptorFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getIoDescriptorOrBuilder() {
+ if (ioDescriptorBuilder_ != null) {
+ return ioDescriptorBuilder_.getMessageOrBuilder();
+ } else {
+ return ioDescriptor_;
+ }
+ }
+ /**
+ * optional .EntityDescriptorProto io_descriptor = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder>
+ getIoDescriptorFieldBuilder() {
+ if (ioDescriptorBuilder_ == null) {
+ ioDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder>(
+ ioDescriptor_,
+ getParentForChildren(),
+ isClean());
+ ioDescriptor_ = null;
+ }
+ return ioDescriptorBuilder_;
+ }
+
+ // optional int32 physical_edge_count = 3;
+ private int physicalEdgeCount_ ;
+ /**
+ * optional int32 physical_edge_count = 3;
+ */
+ public boolean hasPhysicalEdgeCount() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int32 physical_edge_count = 3;
+ */
+ public int getPhysicalEdgeCount() {
+ return physicalEdgeCount_;
+ }
+ /**
+ * optional int32 physical_edge_count = 3;
+ */
+ public Builder setPhysicalEdgeCount(int value) {
+ bitField0_ |= 0x00000004;
+ physicalEdgeCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 physical_edge_count = 3;
+ */
+ public Builder clearPhysicalEdgeCount() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ physicalEdgeCount_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:IOSpecProto)
+ }
+
+ static {
+ defaultInstance = new IOSpecProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:IOSpecProto)
+ }
+
+ public interface GroupInputSpecProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string group_name = 1;
+ /**
+ * optional string group_name = 1;
+ */
+ boolean hasGroupName();
+ /**
+ * optional string group_name = 1;
+ */
+ java.lang.String getGroupName();
+ /**
+ * optional string group_name = 1;
+ */
+ com.google.protobuf.ByteString
+ getGroupNameBytes();
+
+ // repeated string group_vertices = 2;
+ /**
+ * repeated string group_vertices = 2;
+ */
+ java.util.List
+ getGroupVerticesList();
+ /**
+ * repeated string group_vertices = 2;
+ */
+ int getGroupVerticesCount();
+ /**
+ * repeated string group_vertices = 2;
+ */
+ java.lang.String getGroupVertices(int index);
+ /**
+ * repeated string group_vertices = 2;
+ */
+ com.google.protobuf.ByteString
+ getGroupVerticesBytes(int index);
+
+ // optional .EntityDescriptorProto merged_input_descriptor = 3;
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ boolean hasMergedInputDescriptor();
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getMergedInputDescriptor();
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getMergedInputDescriptorOrBuilder();
+ }
+ /**
+ * Protobuf type {@code GroupInputSpecProto}
+ */
+ public static final class GroupInputSpecProto extends
+ com.google.protobuf.GeneratedMessage
+ implements GroupInputSpecProtoOrBuilder {
+ // Use GroupInputSpecProto.newBuilder() to construct.
+ private GroupInputSpecProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private GroupInputSpecProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final GroupInputSpecProto defaultInstance;
+ public static GroupInputSpecProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public GroupInputSpecProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private GroupInputSpecProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ groupName_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ groupVertices_ = new com.google.protobuf.LazyStringArrayList();
+ mutable_bitField0_ |= 0x00000002;
+ }
+ groupVertices_.add(input.readBytes());
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = mergedInputDescriptor_.toBuilder();
+ }
+ mergedInputDescriptor_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(mergedInputDescriptor_);
+ mergedInputDescriptor_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ groupVertices_ = new com.google.protobuf.UnmodifiableLazyStringList(groupVertices_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GroupInputSpecProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GroupInputSpecProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public GroupInputSpecProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new GroupInputSpecProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string group_name = 1;
+ public static final int GROUP_NAME_FIELD_NUMBER = 1;
+ private java.lang.Object groupName_;
+ /**
+ * optional string group_name = 1;
+ */
+ public boolean hasGroupName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string group_name = 1;
+ */
+ public java.lang.String getGroupName() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ groupName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string group_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getGroupNameBytes() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ groupName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // repeated string group_vertices = 2;
+ public static final int GROUP_VERTICES_FIELD_NUMBER = 2;
+ private com.google.protobuf.LazyStringList groupVertices_;
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public java.util.List
+ getGroupVerticesList() {
+ return groupVertices_;
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public int getGroupVerticesCount() {
+ return groupVertices_.size();
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public java.lang.String getGroupVertices(int index) {
+ return groupVertices_.get(index);
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public com.google.protobuf.ByteString
+ getGroupVerticesBytes(int index) {
+ return groupVertices_.getByteString(index);
+ }
+
+ // optional .EntityDescriptorProto merged_input_descriptor = 3;
+ public static final int MERGED_INPUT_DESCRIPTOR_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto mergedInputDescriptor_;
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ public boolean hasMergedInputDescriptor() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getMergedInputDescriptor() {
+ return mergedInputDescriptor_;
+ }
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getMergedInputDescriptorOrBuilder() {
+ return mergedInputDescriptor_;
+ }
+
+ private void initFields() {
+ groupName_ = "";
+ groupVertices_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ mergedInputDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getGroupNameBytes());
+ }
+ for (int i = 0; i < groupVertices_.size(); i++) {
+ output.writeBytes(2, groupVertices_.getByteString(i));
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(3, mergedInputDescriptor_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getGroupNameBytes());
+ }
+ {
+ int dataSize = 0;
+ for (int i = 0; i < groupVertices_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(groupVertices_.getByteString(i));
+ }
+ size += dataSize;
+ size += 1 * getGroupVerticesList().size();
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, mergedInputDescriptor_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto) obj;
+
+ boolean result = true;
+ result = result && (hasGroupName() == other.hasGroupName());
+ if (hasGroupName()) {
+ result = result && getGroupName()
+ .equals(other.getGroupName());
+ }
+ result = result && getGroupVerticesList()
+ .equals(other.getGroupVerticesList());
+ result = result && (hasMergedInputDescriptor() == other.hasMergedInputDescriptor());
+ if (hasMergedInputDescriptor()) {
+ result = result && getMergedInputDescriptor()
+ .equals(other.getMergedInputDescriptor());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasGroupName()) {
+ hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getGroupName().hashCode();
+ }
+ if (getGroupVerticesCount() > 0) {
+ hash = (37 * hash) + GROUP_VERTICES_FIELD_NUMBER;
+ hash = (53 * hash) + getGroupVerticesList().hashCode();
+ }
+ if (hasMergedInputDescriptor()) {
+ hash = (37 * hash) + MERGED_INPUT_DESCRIPTOR_FIELD_NUMBER;
+ hash = (53 * hash) + getMergedInputDescriptor().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code GroupInputSpecProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GroupInputSpecProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GroupInputSpecProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getMergedInputDescriptorFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ groupName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ groupVertices_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (mergedInputDescriptorBuilder_ == null) {
+ mergedInputDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ } else {
+ mergedInputDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GroupInputSpecProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.groupName_ = groupName_;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ groupVertices_ = new com.google.protobuf.UnmodifiableLazyStringList(
+ groupVertices_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.groupVertices_ = groupVertices_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (mergedInputDescriptorBuilder_ == null) {
+ result.mergedInputDescriptor_ = mergedInputDescriptor_;
+ } else {
+ result.mergedInputDescriptor_ = mergedInputDescriptorBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.getDefaultInstance()) return this;
+ if (other.hasGroupName()) {
+ bitField0_ |= 0x00000001;
+ groupName_ = other.groupName_;
+ onChanged();
+ }
+ if (!other.groupVertices_.isEmpty()) {
+ if (groupVertices_.isEmpty()) {
+ groupVertices_ = other.groupVertices_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureGroupVerticesIsMutable();
+ groupVertices_.addAll(other.groupVertices_);
+ }
+ onChanged();
+ }
+ if (other.hasMergedInputDescriptor()) {
+ mergeMergedInputDescriptor(other.getMergedInputDescriptor());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string group_name = 1;
+ private java.lang.Object groupName_ = "";
+ /**
+ * optional string group_name = 1;
+ */
+ public boolean hasGroupName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string group_name = 1;
+ */
+ public java.lang.String getGroupName() {
+ java.lang.Object ref = groupName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ groupName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string group_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getGroupNameBytes() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ groupName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string group_name = 1;
+ */
+ public Builder setGroupName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ groupName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string group_name = 1;
+ */
+ public Builder clearGroupName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ groupName_ = getDefaultInstance().getGroupName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string group_name = 1;
+ */
+ public Builder setGroupNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ groupName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // repeated string group_vertices = 2;
+ private com.google.protobuf.LazyStringList groupVertices_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ private void ensureGroupVerticesIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ groupVertices_ = new com.google.protobuf.LazyStringArrayList(groupVertices_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public java.util.List
+ getGroupVerticesList() {
+ return java.util.Collections.unmodifiableList(groupVertices_);
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public int getGroupVerticesCount() {
+ return groupVertices_.size();
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public java.lang.String getGroupVertices(int index) {
+ return groupVertices_.get(index);
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public com.google.protobuf.ByteString
+ getGroupVerticesBytes(int index) {
+ return groupVertices_.getByteString(index);
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public Builder setGroupVertices(
+ int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureGroupVerticesIsMutable();
+ groupVertices_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public Builder addGroupVertices(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureGroupVerticesIsMutable();
+ groupVertices_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public Builder addAllGroupVertices(
+ java.lang.Iterable values) {
+ ensureGroupVerticesIsMutable();
+ super.addAll(values, groupVertices_);
+ onChanged();
+ return this;
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public Builder clearGroupVertices() {
+ groupVertices_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ return this;
+ }
+ /**
+ * repeated string group_vertices = 2;
+ */
+ public Builder addGroupVerticesBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureGroupVerticesIsMutable();
+ groupVertices_.add(value);
+ onChanged();
+ return this;
+ }
+
+ // optional .EntityDescriptorProto merged_input_descriptor = 3;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto mergedInputDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder> mergedInputDescriptorBuilder_;
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ public boolean hasMergedInputDescriptor() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getMergedInputDescriptor() {
+ if (mergedInputDescriptorBuilder_ == null) {
+ return mergedInputDescriptor_;
+ } else {
+ return mergedInputDescriptorBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ public Builder setMergedInputDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) {
+ if (mergedInputDescriptorBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ mergedInputDescriptor_ = value;
+ onChanged();
+ } else {
+ mergedInputDescriptorBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ public Builder setMergedInputDescriptor(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder builderForValue) {
+ if (mergedInputDescriptorBuilder_ == null) {
+ mergedInputDescriptor_ = builderForValue.build();
+ onChanged();
+ } else {
+ mergedInputDescriptorBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ public Builder mergeMergedInputDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) {
+ if (mergedInputDescriptorBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ mergedInputDescriptor_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance()) {
+ mergedInputDescriptor_ =
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder(mergedInputDescriptor_).mergeFrom(value).buildPartial();
+ } else {
+ mergedInputDescriptor_ = value;
+ }
+ onChanged();
+ } else {
+ mergedInputDescriptorBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ public Builder clearMergedInputDescriptor() {
+ if (mergedInputDescriptorBuilder_ == null) {
+ mergedInputDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ onChanged();
+ } else {
+ mergedInputDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder getMergedInputDescriptorBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getMergedInputDescriptorFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getMergedInputDescriptorOrBuilder() {
+ if (mergedInputDescriptorBuilder_ != null) {
+ return mergedInputDescriptorBuilder_.getMessageOrBuilder();
+ } else {
+ return mergedInputDescriptor_;
+ }
+ }
+ /**
+ * optional .EntityDescriptorProto merged_input_descriptor = 3;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder>
+ getMergedInputDescriptorFieldBuilder() {
+ if (mergedInputDescriptorBuilder_ == null) {
+ mergedInputDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder>(
+ mergedInputDescriptor_,
+ getParentForChildren(),
+ isClean());
+ mergedInputDescriptor_ = null;
+ }
+ return mergedInputDescriptorBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:GroupInputSpecProto)
+ }
+
+ static {
+ defaultInstance = new GroupInputSpecProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:GroupInputSpecProto)
+ }
+
+ public interface FragmentSpecProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string task_attempt_id_string = 1;
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ boolean hasTaskAttemptIdString();
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ java.lang.String getTaskAttemptIdString();
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ com.google.protobuf.ByteString
+ getTaskAttemptIdStringBytes();
+
+ // optional string dag_name = 2;
+ /**
+ * optional string dag_name = 2;
+ */
+ boolean hasDagName();
+ /**
+ * optional string dag_name = 2;
+ */
+ java.lang.String getDagName();
+ /**
+ * optional string dag_name = 2;
+ */
+ com.google.protobuf.ByteString
+ getDagNameBytes();
+
+ // optional string vertex_name = 3;
+ /**
+ * optional string vertex_name = 3;
+ */
+ boolean hasVertexName();
+ /**
+ * optional string vertex_name = 3;
+ */
+ java.lang.String getVertexName();
+ /**
+ * optional string vertex_name = 3;
+ */
+ com.google.protobuf.ByteString
+ getVertexNameBytes();
+
+ // optional .EntityDescriptorProto processor_descriptor = 4;
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ boolean hasProcessorDescriptor();
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getProcessorDescriptor();
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getProcessorDescriptorOrBuilder();
+
+ // repeated .IOSpecProto input_specs = 5;
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ java.util.List
+ getInputSpecsList();
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getInputSpecs(int index);
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ int getInputSpecsCount();
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
+ getInputSpecsOrBuilderList();
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getInputSpecsOrBuilder(
+ int index);
+
+ // repeated .IOSpecProto output_specs = 6;
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ java.util.List
+ getOutputSpecsList();
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getOutputSpecs(int index);
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ int getOutputSpecsCount();
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
+ getOutputSpecsOrBuilderList();
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getOutputSpecsOrBuilder(
+ int index);
+
+ // repeated .GroupInputSpecProto grouped_input_specs = 7;
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ java.util.List
+ getGroupedInputSpecsList();
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto getGroupedInputSpecs(int index);
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ int getGroupedInputSpecsCount();
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder>
+ getGroupedInputSpecsOrBuilderList();
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder getGroupedInputSpecsOrBuilder(
+ int index);
+
+ // optional int32 vertex_parallelism = 8;
+ /**
+ * optional int32 vertex_parallelism = 8;
+ */
+ boolean hasVertexParallelism();
+ /**
+ * optional int32 vertex_parallelism = 8;
+ */
+ int getVertexParallelism();
+
+ // optional int32 fragment_number = 9;
+ /**
+ * optional int32 fragment_number = 9;
+ */
+ boolean hasFragmentNumber();
+ /**
+ * optional int32 fragment_number = 9;
+ */
+ int getFragmentNumber();
+
+ // optional int32 attempt_number = 10;
+ /**
+ * optional int32 attempt_number = 10;
+ */
+ boolean hasAttemptNumber();
+ /**
+ * optional int32 attempt_number = 10;
+ */
+ int getAttemptNumber();
+ }
+ /**
+ * Protobuf type {@code FragmentSpecProto}
+ */
+ public static final class FragmentSpecProto extends
+ com.google.protobuf.GeneratedMessage
+ implements FragmentSpecProtoOrBuilder {
+ // Use FragmentSpecProto.newBuilder() to construct.
+ private FragmentSpecProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private FragmentSpecProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final FragmentSpecProto defaultInstance;
+ public static FragmentSpecProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public FragmentSpecProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private FragmentSpecProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ taskAttemptIdString_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ dagName_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ vertexName_ = input.readBytes();
+ break;
+ }
+ case 34: {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ subBuilder = processorDescriptor_.toBuilder();
+ }
+ processorDescriptor_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(processorDescriptor_);
+ processorDescriptor_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000008;
+ break;
+ }
+ case 42: {
+ if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ inputSpecs_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000010;
+ }
+ inputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry));
+ break;
+ }
+ case 50: {
+ if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+ outputSpecs_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000020;
+ }
+ outputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry));
+ break;
+ }
+ case 58: {
+ if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ groupedInputSpecs_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000040;
+ }
+ groupedInputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.PARSER, extensionRegistry));
+ break;
+ }
+ case 64: {
+ bitField0_ |= 0x00000010;
+ vertexParallelism_ = input.readInt32();
+ break;
+ }
+ case 72: {
+ bitField0_ |= 0x00000020;
+ fragmentNumber_ = input.readInt32();
+ break;
+ }
+ case 80: {
+ bitField0_ |= 0x00000040;
+ attemptNumber_ = input.readInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ inputSpecs_ = java.util.Collections.unmodifiableList(inputSpecs_);
+ }
+ if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+ outputSpecs_ = java.util.Collections.unmodifiableList(outputSpecs_);
+ }
+ if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ groupedInputSpecs_ = java.util.Collections.unmodifiableList(groupedInputSpecs_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentSpecProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentSpecProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public FragmentSpecProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new FragmentSpecProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string task_attempt_id_string = 1;
+ public static final int TASK_ATTEMPT_ID_STRING_FIELD_NUMBER = 1;
+ private java.lang.Object taskAttemptIdString_;
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ public boolean hasTaskAttemptIdString() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ public java.lang.String getTaskAttemptIdString() {
+ java.lang.Object ref = taskAttemptIdString_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ taskAttemptIdString_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTaskAttemptIdStringBytes() {
+ java.lang.Object ref = taskAttemptIdString_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ taskAttemptIdString_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string dag_name = 2;
+ public static final int DAG_NAME_FIELD_NUMBER = 2;
+ private java.lang.Object dagName_;
+ /**
+ * optional string dag_name = 2;
+ */
+ public boolean hasDagName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public java.lang.String getDagName() {
+ java.lang.Object ref = dagName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ dagName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getDagNameBytes() {
+ java.lang.Object ref = dagName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ dagName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string vertex_name = 3;
+ public static final int VERTEX_NAME_FIELD_NUMBER = 3;
+ private java.lang.Object vertexName_;
+ /**
+ * optional string vertex_name = 3;
+ */
+ public boolean hasVertexName() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional string vertex_name = 3;
+ */
+ public java.lang.String getVertexName() {
+ java.lang.Object ref = vertexName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ vertexName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string vertex_name = 3;
+ */
+ public com.google.protobuf.ByteString
+ getVertexNameBytes() {
+ java.lang.Object ref = vertexName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ vertexName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional .EntityDescriptorProto processor_descriptor = 4;
+ public static final int PROCESSOR_DESCRIPTOR_FIELD_NUMBER = 4;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto processorDescriptor_;
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ public boolean hasProcessorDescriptor() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getProcessorDescriptor() {
+ return processorDescriptor_;
+ }
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getProcessorDescriptorOrBuilder() {
+ return processorDescriptor_;
+ }
+
+ // repeated .IOSpecProto input_specs = 5;
+ public static final int INPUT_SPECS_FIELD_NUMBER = 5;
+ private java.util.List inputSpecs_;
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public java.util.List getInputSpecsList() {
+ return inputSpecs_;
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
+ getInputSpecsOrBuilderList() {
+ return inputSpecs_;
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public int getInputSpecsCount() {
+ return inputSpecs_.size();
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getInputSpecs(int index) {
+ return inputSpecs_.get(index);
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getInputSpecsOrBuilder(
+ int index) {
+ return inputSpecs_.get(index);
+ }
+
+ // repeated .IOSpecProto output_specs = 6;
+ public static final int OUTPUT_SPECS_FIELD_NUMBER = 6;
+ private java.util.List outputSpecs_;
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public java.util.List getOutputSpecsList() {
+ return outputSpecs_;
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
+ getOutputSpecsOrBuilderList() {
+ return outputSpecs_;
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public int getOutputSpecsCount() {
+ return outputSpecs_.size();
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getOutputSpecs(int index) {
+ return outputSpecs_.get(index);
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getOutputSpecsOrBuilder(
+ int index) {
+ return outputSpecs_.get(index);
+ }
+
+ // repeated .GroupInputSpecProto grouped_input_specs = 7;
+ public static final int GROUPED_INPUT_SPECS_FIELD_NUMBER = 7;
+ private java.util.List groupedInputSpecs_;
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public java.util.List getGroupedInputSpecsList() {
+ return groupedInputSpecs_;
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder>
+ getGroupedInputSpecsOrBuilderList() {
+ return groupedInputSpecs_;
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public int getGroupedInputSpecsCount() {
+ return groupedInputSpecs_.size();
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto getGroupedInputSpecs(int index) {
+ return groupedInputSpecs_.get(index);
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder getGroupedInputSpecsOrBuilder(
+ int index) {
+ return groupedInputSpecs_.get(index);
+ }
+
+ // optional int32 vertex_parallelism = 8;
+ public static final int VERTEX_PARALLELISM_FIELD_NUMBER = 8;
+ private int vertexParallelism_;
+ /**
+ * optional int32 vertex_parallelism = 8;
+ */
+ public boolean hasVertexParallelism() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * optional int32 vertex_parallelism = 8;
+ */
+ public int getVertexParallelism() {
+ return vertexParallelism_;
+ }
+
+ // optional int32 fragment_number = 9;
+ public static final int FRAGMENT_NUMBER_FIELD_NUMBER = 9;
+ private int fragmentNumber_;
+ /**
+ * optional int32 fragment_number = 9;
+ */
+ public boolean hasFragmentNumber() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * optional int32 fragment_number = 9;
+ */
+ public int getFragmentNumber() {
+ return fragmentNumber_;
+ }
+
+ // optional int32 attempt_number = 10;
+ public static final int ATTEMPT_NUMBER_FIELD_NUMBER = 10;
+ private int attemptNumber_;
+ /**
+ * optional int32 attempt_number = 10;
+ */
+ public boolean hasAttemptNumber() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * optional int32 attempt_number = 10;
+ */
+ public int getAttemptNumber() {
+ return attemptNumber_;
+ }
+
+ private void initFields() {
+ taskAttemptIdString_ = "";
+ dagName_ = "";
+ vertexName_ = "";
+ processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ inputSpecs_ = java.util.Collections.emptyList();
+ outputSpecs_ = java.util.Collections.emptyList();
+ groupedInputSpecs_ = java.util.Collections.emptyList();
+ vertexParallelism_ = 0;
+ fragmentNumber_ = 0;
+ attemptNumber_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getTaskAttemptIdStringBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getDagNameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getVertexNameBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeMessage(4, processorDescriptor_);
+ }
+ for (int i = 0; i < inputSpecs_.size(); i++) {
+ output.writeMessage(5, inputSpecs_.get(i));
+ }
+ for (int i = 0; i < outputSpecs_.size(); i++) {
+ output.writeMessage(6, outputSpecs_.get(i));
+ }
+ for (int i = 0; i < groupedInputSpecs_.size(); i++) {
+ output.writeMessage(7, groupedInputSpecs_.get(i));
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeInt32(8, vertexParallelism_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeInt32(9, fragmentNumber_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeInt32(10, attemptNumber_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getTaskAttemptIdStringBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getDagNameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getVertexNameBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, processorDescriptor_);
+ }
+ for (int i = 0; i < inputSpecs_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(5, inputSpecs_.get(i));
+ }
+ for (int i = 0; i < outputSpecs_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(6, outputSpecs_.get(i));
+ }
+ for (int i = 0; i < groupedInputSpecs_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(7, groupedInputSpecs_.get(i));
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(8, vertexParallelism_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(9, fragmentNumber_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(10, attemptNumber_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto) obj;
+
+ boolean result = true;
+ result = result && (hasTaskAttemptIdString() == other.hasTaskAttemptIdString());
+ if (hasTaskAttemptIdString()) {
+ result = result && getTaskAttemptIdString()
+ .equals(other.getTaskAttemptIdString());
+ }
+ result = result && (hasDagName() == other.hasDagName());
+ if (hasDagName()) {
+ result = result && getDagName()
+ .equals(other.getDagName());
+ }
+ result = result && (hasVertexName() == other.hasVertexName());
+ if (hasVertexName()) {
+ result = result && getVertexName()
+ .equals(other.getVertexName());
+ }
+ result = result && (hasProcessorDescriptor() == other.hasProcessorDescriptor());
+ if (hasProcessorDescriptor()) {
+ result = result && getProcessorDescriptor()
+ .equals(other.getProcessorDescriptor());
+ }
+ result = result && getInputSpecsList()
+ .equals(other.getInputSpecsList());
+ result = result && getOutputSpecsList()
+ .equals(other.getOutputSpecsList());
+ result = result && getGroupedInputSpecsList()
+ .equals(other.getGroupedInputSpecsList());
+ result = result && (hasVertexParallelism() == other.hasVertexParallelism());
+ if (hasVertexParallelism()) {
+ result = result && (getVertexParallelism()
+ == other.getVertexParallelism());
+ }
+ result = result && (hasFragmentNumber() == other.hasFragmentNumber());
+ if (hasFragmentNumber()) {
+ result = result && (getFragmentNumber()
+ == other.getFragmentNumber());
+ }
+ result = result && (hasAttemptNumber() == other.hasAttemptNumber());
+ if (hasAttemptNumber()) {
+ result = result && (getAttemptNumber()
+ == other.getAttemptNumber());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasTaskAttemptIdString()) {
+ hash = (37 * hash) + TASK_ATTEMPT_ID_STRING_FIELD_NUMBER;
+ hash = (53 * hash) + getTaskAttemptIdString().hashCode();
+ }
+ if (hasDagName()) {
+ hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getDagName().hashCode();
+ }
+ if (hasVertexName()) {
+ hash = (37 * hash) + VERTEX_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getVertexName().hashCode();
+ }
+ if (hasProcessorDescriptor()) {
+ hash = (37 * hash) + PROCESSOR_DESCRIPTOR_FIELD_NUMBER;
+ hash = (53 * hash) + getProcessorDescriptor().hashCode();
+ }
+ if (getInputSpecsCount() > 0) {
+ hash = (37 * hash) + INPUT_SPECS_FIELD_NUMBER;
+ hash = (53 * hash) + getInputSpecsList().hashCode();
+ }
+ if (getOutputSpecsCount() > 0) {
+ hash = (37 * hash) + OUTPUT_SPECS_FIELD_NUMBER;
+ hash = (53 * hash) + getOutputSpecsList().hashCode();
+ }
+ if (getGroupedInputSpecsCount() > 0) {
+ hash = (37 * hash) + GROUPED_INPUT_SPECS_FIELD_NUMBER;
+ hash = (53 * hash) + getGroupedInputSpecsList().hashCode();
+ }
+ if (hasVertexParallelism()) {
+ hash = (37 * hash) + VERTEX_PARALLELISM_FIELD_NUMBER;
+ hash = (53 * hash) + getVertexParallelism();
+ }
+ if (hasFragmentNumber()) {
+ hash = (37 * hash) + FRAGMENT_NUMBER_FIELD_NUMBER;
+ hash = (53 * hash) + getFragmentNumber();
+ }
+ if (hasAttemptNumber()) {
+ hash = (37 * hash) + ATTEMPT_NUMBER_FIELD_NUMBER;
+ hash = (53 * hash) + getAttemptNumber();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code FragmentSpecProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentSpecProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentSpecProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getProcessorDescriptorFieldBuilder();
+ getInputSpecsFieldBuilder();
+ getOutputSpecsFieldBuilder();
+ getGroupedInputSpecsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ taskAttemptIdString_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ dagName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ vertexName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
+ if (processorDescriptorBuilder_ == null) {
+ processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ } else {
+ processorDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ if (inputSpecsBuilder_ == null) {
+ inputSpecs_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000010);
+ } else {
+ inputSpecsBuilder_.clear();
+ }
+ if (outputSpecsBuilder_ == null) {
+ outputSpecs_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000020);
+ } else {
+ outputSpecsBuilder_.clear();
+ }
+ if (groupedInputSpecsBuilder_ == null) {
+ groupedInputSpecs_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000040);
+ } else {
+ groupedInputSpecsBuilder_.clear();
+ }
+ vertexParallelism_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000080);
+ fragmentNumber_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000100);
+ attemptNumber_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000200);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentSpecProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.taskAttemptIdString_ = taskAttemptIdString_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.dagName_ = dagName_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.vertexName_ = vertexName_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ if (processorDescriptorBuilder_ == null) {
+ result.processorDescriptor_ = processorDescriptor_;
+ } else {
+ result.processorDescriptor_ = processorDescriptorBuilder_.build();
+ }
+ if (inputSpecsBuilder_ == null) {
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ inputSpecs_ = java.util.Collections.unmodifiableList(inputSpecs_);
+ bitField0_ = (bitField0_ & ~0x00000010);
+ }
+ result.inputSpecs_ = inputSpecs_;
+ } else {
+ result.inputSpecs_ = inputSpecsBuilder_.build();
+ }
+ if (outputSpecsBuilder_ == null) {
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ outputSpecs_ = java.util.Collections.unmodifiableList(outputSpecs_);
+ bitField0_ = (bitField0_ & ~0x00000020);
+ }
+ result.outputSpecs_ = outputSpecs_;
+ } else {
+ result.outputSpecs_ = outputSpecsBuilder_.build();
+ }
+ if (groupedInputSpecsBuilder_ == null) {
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ groupedInputSpecs_ = java.util.Collections.unmodifiableList(groupedInputSpecs_);
+ bitField0_ = (bitField0_ & ~0x00000040);
+ }
+ result.groupedInputSpecs_ = groupedInputSpecs_;
+ } else {
+ result.groupedInputSpecs_ = groupedInputSpecsBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.vertexParallelism_ = vertexParallelism_;
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.fragmentNumber_ = fragmentNumber_;
+ if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.attemptNumber_ = attemptNumber_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.getDefaultInstance()) return this;
+ if (other.hasTaskAttemptIdString()) {
+ bitField0_ |= 0x00000001;
+ taskAttemptIdString_ = other.taskAttemptIdString_;
+ onChanged();
+ }
+ if (other.hasDagName()) {
+ bitField0_ |= 0x00000002;
+ dagName_ = other.dagName_;
+ onChanged();
+ }
+ if (other.hasVertexName()) {
+ bitField0_ |= 0x00000004;
+ vertexName_ = other.vertexName_;
+ onChanged();
+ }
+ if (other.hasProcessorDescriptor()) {
+ mergeProcessorDescriptor(other.getProcessorDescriptor());
+ }
+ if (inputSpecsBuilder_ == null) {
+ if (!other.inputSpecs_.isEmpty()) {
+ if (inputSpecs_.isEmpty()) {
+ inputSpecs_ = other.inputSpecs_;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ } else {
+ ensureInputSpecsIsMutable();
+ inputSpecs_.addAll(other.inputSpecs_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.inputSpecs_.isEmpty()) {
+ if (inputSpecsBuilder_.isEmpty()) {
+ inputSpecsBuilder_.dispose();
+ inputSpecsBuilder_ = null;
+ inputSpecs_ = other.inputSpecs_;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ inputSpecsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getInputSpecsFieldBuilder() : null;
+ } else {
+ inputSpecsBuilder_.addAllMessages(other.inputSpecs_);
+ }
+ }
+ }
+ if (outputSpecsBuilder_ == null) {
+ if (!other.outputSpecs_.isEmpty()) {
+ if (outputSpecs_.isEmpty()) {
+ outputSpecs_ = other.outputSpecs_;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ } else {
+ ensureOutputSpecsIsMutable();
+ outputSpecs_.addAll(other.outputSpecs_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.outputSpecs_.isEmpty()) {
+ if (outputSpecsBuilder_.isEmpty()) {
+ outputSpecsBuilder_.dispose();
+ outputSpecsBuilder_ = null;
+ outputSpecs_ = other.outputSpecs_;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ outputSpecsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getOutputSpecsFieldBuilder() : null;
+ } else {
+ outputSpecsBuilder_.addAllMessages(other.outputSpecs_);
+ }
+ }
+ }
+ if (groupedInputSpecsBuilder_ == null) {
+ if (!other.groupedInputSpecs_.isEmpty()) {
+ if (groupedInputSpecs_.isEmpty()) {
+ groupedInputSpecs_ = other.groupedInputSpecs_;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ } else {
+ ensureGroupedInputSpecsIsMutable();
+ groupedInputSpecs_.addAll(other.groupedInputSpecs_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.groupedInputSpecs_.isEmpty()) {
+ if (groupedInputSpecsBuilder_.isEmpty()) {
+ groupedInputSpecsBuilder_.dispose();
+ groupedInputSpecsBuilder_ = null;
+ groupedInputSpecs_ = other.groupedInputSpecs_;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ groupedInputSpecsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getGroupedInputSpecsFieldBuilder() : null;
+ } else {
+ groupedInputSpecsBuilder_.addAllMessages(other.groupedInputSpecs_);
+ }
+ }
+ }
+ if (other.hasVertexParallelism()) {
+ setVertexParallelism(other.getVertexParallelism());
+ }
+ if (other.hasFragmentNumber()) {
+ setFragmentNumber(other.getFragmentNumber());
+ }
+ if (other.hasAttemptNumber()) {
+ setAttemptNumber(other.getAttemptNumber());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string task_attempt_id_string = 1;
+ private java.lang.Object taskAttemptIdString_ = "";
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ public boolean hasTaskAttemptIdString() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ public java.lang.String getTaskAttemptIdString() {
+ java.lang.Object ref = taskAttemptIdString_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ taskAttemptIdString_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTaskAttemptIdStringBytes() {
+ java.lang.Object ref = taskAttemptIdString_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ taskAttemptIdString_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ public Builder setTaskAttemptIdString(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ taskAttemptIdString_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ public Builder clearTaskAttemptIdString() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ taskAttemptIdString_ = getDefaultInstance().getTaskAttemptIdString();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string task_attempt_id_string = 1;
+ */
+ public Builder setTaskAttemptIdStringBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ taskAttemptIdString_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string dag_name = 2;
+ private java.lang.Object dagName_ = "";
+ /**
+ * optional string dag_name = 2;
+ */
+ public boolean hasDagName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public java.lang.String getDagName() {
+ java.lang.Object ref = dagName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ dagName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getDagNameBytes() {
+ java.lang.Object ref = dagName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ dagName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public Builder setDagName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ dagName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public Builder clearDagName() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ dagName_ = getDefaultInstance().getDagName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public Builder setDagNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ dagName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string vertex_name = 3;
+ private java.lang.Object vertexName_ = "";
+ /**
+ * optional string vertex_name = 3;
+ */
+ public boolean hasVertexName() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional string vertex_name = 3;
+ */
+ public java.lang.String getVertexName() {
+ java.lang.Object ref = vertexName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ vertexName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string vertex_name = 3;
+ */
+ public com.google.protobuf.ByteString
+ getVertexNameBytes() {
+ java.lang.Object ref = vertexName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ vertexName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string vertex_name = 3;
+ */
+ public Builder setVertexName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ vertexName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string vertex_name = 3;
+ */
+ public Builder clearVertexName() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ vertexName_ = getDefaultInstance().getVertexName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string vertex_name = 3;
+ */
+ public Builder setVertexNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ vertexName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional .EntityDescriptorProto processor_descriptor = 4;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder> processorDescriptorBuilder_;
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ public boolean hasProcessorDescriptor() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getProcessorDescriptor() {
+ if (processorDescriptorBuilder_ == null) {
+ return processorDescriptor_;
+ } else {
+ return processorDescriptorBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ public Builder setProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) {
+ if (processorDescriptorBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ processorDescriptor_ = value;
+ onChanged();
+ } else {
+ processorDescriptorBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ public Builder setProcessorDescriptor(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder builderForValue) {
+ if (processorDescriptorBuilder_ == null) {
+ processorDescriptor_ = builderForValue.build();
+ onChanged();
+ } else {
+ processorDescriptorBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ public Builder mergeProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) {
+ if (processorDescriptorBuilder_ == null) {
+ if (((bitField0_ & 0x00000008) == 0x00000008) &&
+ processorDescriptor_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance()) {
+ processorDescriptor_ =
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder(processorDescriptor_).mergeFrom(value).buildPartial();
+ } else {
+ processorDescriptor_ = value;
+ }
+ onChanged();
+ } else {
+ processorDescriptorBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ public Builder clearProcessorDescriptor() {
+ if (processorDescriptorBuilder_ == null) {
+ processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
+ onChanged();
+ } else {
+ processorDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder getProcessorDescriptorBuilder() {
+ bitField0_ |= 0x00000008;
+ onChanged();
+ return getProcessorDescriptorFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getProcessorDescriptorOrBuilder() {
+ if (processorDescriptorBuilder_ != null) {
+ return processorDescriptorBuilder_.getMessageOrBuilder();
+ } else {
+ return processorDescriptor_;
+ }
+ }
+ /**
+ * optional .EntityDescriptorProto processor_descriptor = 4;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder>
+ getProcessorDescriptorFieldBuilder() {
+ if (processorDescriptorBuilder_ == null) {
+ processorDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder>(
+ processorDescriptor_,
+ getParentForChildren(),
+ isClean());
+ processorDescriptor_ = null;
+ }
+ return processorDescriptorBuilder_;
+ }
+
+ // repeated .IOSpecProto input_specs = 5;
+ private java.util.List inputSpecs_ =
+ java.util.Collections.emptyList();
+ private void ensureInputSpecsIsMutable() {
+ if (!((bitField0_ & 0x00000010) == 0x00000010)) {
+ inputSpecs_ = new java.util.ArrayList(inputSpecs_);
+ bitField0_ |= 0x00000010;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder> inputSpecsBuilder_;
+
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public java.util.List getInputSpecsList() {
+ if (inputSpecsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(inputSpecs_);
+ } else {
+ return inputSpecsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public int getInputSpecsCount() {
+ if (inputSpecsBuilder_ == null) {
+ return inputSpecs_.size();
+ } else {
+ return inputSpecsBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getInputSpecs(int index) {
+ if (inputSpecsBuilder_ == null) {
+ return inputSpecs_.get(index);
+ } else {
+ return inputSpecsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public Builder setInputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto value) {
+ if (inputSpecsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureInputSpecsIsMutable();
+ inputSpecs_.set(index, value);
+ onChanged();
+ } else {
+ inputSpecsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public Builder setInputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder builderForValue) {
+ if (inputSpecsBuilder_ == null) {
+ ensureInputSpecsIsMutable();
+ inputSpecs_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ inputSpecsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public Builder addInputSpecs(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto value) {
+ if (inputSpecsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureInputSpecsIsMutable();
+ inputSpecs_.add(value);
+ onChanged();
+ } else {
+ inputSpecsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public Builder addInputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto value) {
+ if (inputSpecsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureInputSpecsIsMutable();
+ inputSpecs_.add(index, value);
+ onChanged();
+ } else {
+ inputSpecsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public Builder addInputSpecs(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder builderForValue) {
+ if (inputSpecsBuilder_ == null) {
+ ensureInputSpecsIsMutable();
+ inputSpecs_.add(builderForValue.build());
+ onChanged();
+ } else {
+ inputSpecsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public Builder addInputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder builderForValue) {
+ if (inputSpecsBuilder_ == null) {
+ ensureInputSpecsIsMutable();
+ inputSpecs_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ inputSpecsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public Builder addAllInputSpecs(
+ java.lang.Iterable extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto> values) {
+ if (inputSpecsBuilder_ == null) {
+ ensureInputSpecsIsMutable();
+ super.addAll(values, inputSpecs_);
+ onChanged();
+ } else {
+ inputSpecsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public Builder clearInputSpecs() {
+ if (inputSpecsBuilder_ == null) {
+ inputSpecs_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000010);
+ onChanged();
+ } else {
+ inputSpecsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public Builder removeInputSpecs(int index) {
+ if (inputSpecsBuilder_ == null) {
+ ensureInputSpecsIsMutable();
+ inputSpecs_.remove(index);
+ onChanged();
+ } else {
+ inputSpecsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder getInputSpecsBuilder(
+ int index) {
+ return getInputSpecsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getInputSpecsOrBuilder(
+ int index) {
+ if (inputSpecsBuilder_ == null) {
+ return inputSpecs_.get(index); } else {
+ return inputSpecsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
+ getInputSpecsOrBuilderList() {
+ if (inputSpecsBuilder_ != null) {
+ return inputSpecsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(inputSpecs_);
+ }
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder addInputSpecsBuilder() {
+ return getInputSpecsFieldBuilder().addBuilder(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.getDefaultInstance());
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder addInputSpecsBuilder(
+ int index) {
+ return getInputSpecsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.getDefaultInstance());
+ }
+ /**
+ * repeated .IOSpecProto input_specs = 5;
+ */
+ public java.util.List
+ getInputSpecsBuilderList() {
+ return getInputSpecsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
+ getInputSpecsFieldBuilder() {
+ if (inputSpecsBuilder_ == null) {
+ inputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>(
+ inputSpecs_,
+ ((bitField0_ & 0x00000010) == 0x00000010),
+ getParentForChildren(),
+ isClean());
+ inputSpecs_ = null;
+ }
+ return inputSpecsBuilder_;
+ }
+
+ // repeated .IOSpecProto output_specs = 6;
+ private java.util.List outputSpecs_ =
+ java.util.Collections.emptyList();
+ private void ensureOutputSpecsIsMutable() {
+ if (!((bitField0_ & 0x00000020) == 0x00000020)) {
+ outputSpecs_ = new java.util.ArrayList(outputSpecs_);
+ bitField0_ |= 0x00000020;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder> outputSpecsBuilder_;
+
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public java.util.List getOutputSpecsList() {
+ if (outputSpecsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(outputSpecs_);
+ } else {
+ return outputSpecsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public int getOutputSpecsCount() {
+ if (outputSpecsBuilder_ == null) {
+ return outputSpecs_.size();
+ } else {
+ return outputSpecsBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getOutputSpecs(int index) {
+ if (outputSpecsBuilder_ == null) {
+ return outputSpecs_.get(index);
+ } else {
+ return outputSpecsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public Builder setOutputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto value) {
+ if (outputSpecsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureOutputSpecsIsMutable();
+ outputSpecs_.set(index, value);
+ onChanged();
+ } else {
+ outputSpecsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public Builder setOutputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder builderForValue) {
+ if (outputSpecsBuilder_ == null) {
+ ensureOutputSpecsIsMutable();
+ outputSpecs_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ outputSpecsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public Builder addOutputSpecs(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto value) {
+ if (outputSpecsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureOutputSpecsIsMutable();
+ outputSpecs_.add(value);
+ onChanged();
+ } else {
+ outputSpecsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public Builder addOutputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto value) {
+ if (outputSpecsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureOutputSpecsIsMutable();
+ outputSpecs_.add(index, value);
+ onChanged();
+ } else {
+ outputSpecsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public Builder addOutputSpecs(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder builderForValue) {
+ if (outputSpecsBuilder_ == null) {
+ ensureOutputSpecsIsMutable();
+ outputSpecs_.add(builderForValue.build());
+ onChanged();
+ } else {
+ outputSpecsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public Builder addOutputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder builderForValue) {
+ if (outputSpecsBuilder_ == null) {
+ ensureOutputSpecsIsMutable();
+ outputSpecs_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ outputSpecsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public Builder addAllOutputSpecs(
+ java.lang.Iterable extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto> values) {
+ if (outputSpecsBuilder_ == null) {
+ ensureOutputSpecsIsMutable();
+ super.addAll(values, outputSpecs_);
+ onChanged();
+ } else {
+ outputSpecsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public Builder clearOutputSpecs() {
+ if (outputSpecsBuilder_ == null) {
+ outputSpecs_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000020);
+ onChanged();
+ } else {
+ outputSpecsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public Builder removeOutputSpecs(int index) {
+ if (outputSpecsBuilder_ == null) {
+ ensureOutputSpecsIsMutable();
+ outputSpecs_.remove(index);
+ onChanged();
+ } else {
+ outputSpecsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder getOutputSpecsBuilder(
+ int index) {
+ return getOutputSpecsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getOutputSpecsOrBuilder(
+ int index) {
+ if (outputSpecsBuilder_ == null) {
+ return outputSpecs_.get(index); } else {
+ return outputSpecsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
+ getOutputSpecsOrBuilderList() {
+ if (outputSpecsBuilder_ != null) {
+ return outputSpecsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(outputSpecs_);
+ }
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder addOutputSpecsBuilder() {
+ return getOutputSpecsFieldBuilder().addBuilder(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.getDefaultInstance());
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder addOutputSpecsBuilder(
+ int index) {
+ return getOutputSpecsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.getDefaultInstance());
+ }
+ /**
+ * repeated .IOSpecProto output_specs = 6;
+ */
+ public java.util.List
+ getOutputSpecsBuilderList() {
+ return getOutputSpecsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
+ getOutputSpecsFieldBuilder() {
+ if (outputSpecsBuilder_ == null) {
+ outputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>(
+ outputSpecs_,
+ ((bitField0_ & 0x00000020) == 0x00000020),
+ getParentForChildren(),
+ isClean());
+ outputSpecs_ = null;
+ }
+ return outputSpecsBuilder_;
+ }
+
+ // repeated .GroupInputSpecProto grouped_input_specs = 7;
+ private java.util.List groupedInputSpecs_ =
+ java.util.Collections.emptyList();
+ private void ensureGroupedInputSpecsIsMutable() {
+ if (!((bitField0_ & 0x00000040) == 0x00000040)) {
+ groupedInputSpecs_ = new java.util.ArrayList(groupedInputSpecs_);
+ bitField0_ |= 0x00000040;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder> groupedInputSpecsBuilder_;
+
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public java.util.List getGroupedInputSpecsList() {
+ if (groupedInputSpecsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(groupedInputSpecs_);
+ } else {
+ return groupedInputSpecsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public int getGroupedInputSpecsCount() {
+ if (groupedInputSpecsBuilder_ == null) {
+ return groupedInputSpecs_.size();
+ } else {
+ return groupedInputSpecsBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto getGroupedInputSpecs(int index) {
+ if (groupedInputSpecsBuilder_ == null) {
+ return groupedInputSpecs_.get(index);
+ } else {
+ return groupedInputSpecsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public Builder setGroupedInputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto value) {
+ if (groupedInputSpecsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureGroupedInputSpecsIsMutable();
+ groupedInputSpecs_.set(index, value);
+ onChanged();
+ } else {
+ groupedInputSpecsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public Builder setGroupedInputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder builderForValue) {
+ if (groupedInputSpecsBuilder_ == null) {
+ ensureGroupedInputSpecsIsMutable();
+ groupedInputSpecs_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ groupedInputSpecsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public Builder addGroupedInputSpecs(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto value) {
+ if (groupedInputSpecsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureGroupedInputSpecsIsMutable();
+ groupedInputSpecs_.add(value);
+ onChanged();
+ } else {
+ groupedInputSpecsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public Builder addGroupedInputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto value) {
+ if (groupedInputSpecsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureGroupedInputSpecsIsMutable();
+ groupedInputSpecs_.add(index, value);
+ onChanged();
+ } else {
+ groupedInputSpecsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public Builder addGroupedInputSpecs(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder builderForValue) {
+ if (groupedInputSpecsBuilder_ == null) {
+ ensureGroupedInputSpecsIsMutable();
+ groupedInputSpecs_.add(builderForValue.build());
+ onChanged();
+ } else {
+ groupedInputSpecsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public Builder addGroupedInputSpecs(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder builderForValue) {
+ if (groupedInputSpecsBuilder_ == null) {
+ ensureGroupedInputSpecsIsMutable();
+ groupedInputSpecs_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ groupedInputSpecsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public Builder addAllGroupedInputSpecs(
+ java.lang.Iterable extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto> values) {
+ if (groupedInputSpecsBuilder_ == null) {
+ ensureGroupedInputSpecsIsMutable();
+ super.addAll(values, groupedInputSpecs_);
+ onChanged();
+ } else {
+ groupedInputSpecsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public Builder clearGroupedInputSpecs() {
+ if (groupedInputSpecsBuilder_ == null) {
+ groupedInputSpecs_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000040);
+ onChanged();
+ } else {
+ groupedInputSpecsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public Builder removeGroupedInputSpecs(int index) {
+ if (groupedInputSpecsBuilder_ == null) {
+ ensureGroupedInputSpecsIsMutable();
+ groupedInputSpecs_.remove(index);
+ onChanged();
+ } else {
+ groupedInputSpecsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder getGroupedInputSpecsBuilder(
+ int index) {
+ return getGroupedInputSpecsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder getGroupedInputSpecsOrBuilder(
+ int index) {
+ if (groupedInputSpecsBuilder_ == null) {
+ return groupedInputSpecs_.get(index); } else {
+ return groupedInputSpecsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder>
+ getGroupedInputSpecsOrBuilderList() {
+ if (groupedInputSpecsBuilder_ != null) {
+ return groupedInputSpecsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(groupedInputSpecs_);
+ }
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder addGroupedInputSpecsBuilder() {
+ return getGroupedInputSpecsFieldBuilder().addBuilder(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.getDefaultInstance());
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder addGroupedInputSpecsBuilder(
+ int index) {
+ return getGroupedInputSpecsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.getDefaultInstance());
+ }
+ /**
+ * repeated .GroupInputSpecProto grouped_input_specs = 7;
+ */
+ public java.util.List
+ getGroupedInputSpecsBuilderList() {
+ return getGroupedInputSpecsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder>
+ getGroupedInputSpecsFieldBuilder() {
+ if (groupedInputSpecsBuilder_ == null) {
+ groupedInputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder>(
+ groupedInputSpecs_,
+ ((bitField0_ & 0x00000040) == 0x00000040),
+ getParentForChildren(),
+ isClean());
+ groupedInputSpecs_ = null;
+ }
+ return groupedInputSpecsBuilder_;
+ }
+
+ // optional int32 vertex_parallelism = 8;
+ private int vertexParallelism_ ;
+ /**
+ * optional int32 vertex_parallelism = 8;
+ */
+ public boolean hasVertexParallelism() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * optional int32 vertex_parallelism = 8;
+ */
+ public int getVertexParallelism() {
+ return vertexParallelism_;
+ }
+ /**
+ * optional int32 vertex_parallelism = 8;
+ */
+ public Builder setVertexParallelism(int value) {
+ bitField0_ |= 0x00000080;
+ vertexParallelism_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 vertex_parallelism = 8;
+ */
+ public Builder clearVertexParallelism() {
+ bitField0_ = (bitField0_ & ~0x00000080);
+ vertexParallelism_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 fragment_number = 9;
+ private int fragmentNumber_ ;
+ /**
+ * optional int32 fragment_number = 9;
+ */
+ public boolean hasFragmentNumber() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * optional int32 fragment_number = 9;
+ */
+ public int getFragmentNumber() {
+ return fragmentNumber_;
+ }
+ /**
+ * optional int32 fragment_number = 9;
+ */
+ public Builder setFragmentNumber(int value) {
+ bitField0_ |= 0x00000100;
+ fragmentNumber_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 fragment_number = 9;
+ */
+ public Builder clearFragmentNumber() {
+ bitField0_ = (bitField0_ & ~0x00000100);
+ fragmentNumber_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 attempt_number = 10;
+ private int attemptNumber_ ;
+ /**
+ * optional int32 attempt_number = 10;
+ */
+ public boolean hasAttemptNumber() {
+ return ((bitField0_ & 0x00000200) == 0x00000200);
+ }
+ /**
+ * optional int32 attempt_number = 10;
+ */
+ public int getAttemptNumber() {
+ return attemptNumber_;
+ }
+ /**
+ * optional int32 attempt_number = 10;
+ */
+ public Builder setAttemptNumber(int value) {
+ bitField0_ |= 0x00000200;
+ attemptNumber_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 attempt_number = 10;
+ */
+ public Builder clearAttemptNumber() {
+ bitField0_ = (bitField0_ & ~0x00000200);
+ attemptNumber_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:FragmentSpecProto)
+ }
+
+ static {
+ defaultInstance = new FragmentSpecProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:FragmentSpecProto)
+ }
+
+ public interface SubmitWorkRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional string container_id_string = 1;
@@ -115,26 +5560,40 @@ public static void registerAllExtensions(
* optional int32 app_attempt_number = 8;
*/
int getAppAttemptNumber();
+
+ // optional .FragmentSpecProto fragment_spec = 9;
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ boolean hasFragmentSpec();
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto getFragmentSpec();
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProtoOrBuilder getFragmentSpecOrBuilder();
}
/**
- * Protobuf type {@code RunContainerRequestProto}
+ * Protobuf type {@code SubmitWorkRequestProto}
*/
- public static final class RunContainerRequestProto extends
+ public static final class SubmitWorkRequestProto extends
com.google.protobuf.GeneratedMessage
- implements RunContainerRequestProtoOrBuilder {
- // Use RunContainerRequestProto.newBuilder() to construct.
- private RunContainerRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ implements SubmitWorkRequestProtoOrBuilder {
+ // Use SubmitWorkRequestProto.newBuilder() to construct.
+ private SubmitWorkRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
- private RunContainerRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+ private SubmitWorkRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
- private static final RunContainerRequestProto defaultInstance;
- public static RunContainerRequestProto getDefaultInstance() {
+ private static final SubmitWorkRequestProto defaultInstance;
+ public static SubmitWorkRequestProto getDefaultInstance() {
return defaultInstance;
}
- public RunContainerRequestProto getDefaultInstanceForType() {
+ public SubmitWorkRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
@@ -144,7 +5603,7 @@ public RunContainerRequestProto getDefaultInstanceForType() {
getUnknownFields() {
return this.unknownFields;
}
- private RunContainerRequestProto(
+ private SubmitWorkRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
@@ -207,6 +5666,19 @@ private RunContainerRequestProto(
appAttemptNumber_ = input.readInt32();
break;
}
+ case 74: {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ subBuilder = fragmentSpec_.toBuilder();
+ }
+ fragmentSpec_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(fragmentSpec_);
+ fragmentSpec_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000100;
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -221,28 +5693,28 @@ private RunContainerRequestProto(
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerRequestProto_descriptor;
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerRequestProto_fieldAccessorTable
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.Builder.class);
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.Builder.class);
}
- public static com.google.protobuf.Parser PARSER =
- new com.google.protobuf.AbstractParser() {
- public RunContainerRequestProto parsePartialFrom(
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public SubmitWorkRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return new RunContainerRequestProto(input, extensionRegistry);
+ return new SubmitWorkRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
- public com.google.protobuf.Parser getParserForType() {
+ public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@@ -510,6 +5982,28 @@ public int getAppAttemptNumber() {
return appAttemptNumber_;
}
+ // optional .FragmentSpecProto fragment_spec = 9;
+ public static final int FRAGMENT_SPEC_FIELD_NUMBER = 9;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto fragmentSpec_;
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ public boolean hasFragmentSpec() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto getFragmentSpec() {
+ return fragmentSpec_;
+ }
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProtoOrBuilder getFragmentSpecOrBuilder() {
+ return fragmentSpec_;
+ }
+
private void initFields() {
containerIdString_ = "";
amHost_ = "";
@@ -519,6 +6013,7 @@ private void initFields() {
user_ = "";
applicationIdString_ = "";
appAttemptNumber_ = 0;
+ fragmentSpec_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -556,6 +6051,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeInt32(8, appAttemptNumber_);
}
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ output.writeMessage(9, fragmentSpec_);
+ }
getUnknownFields().writeTo(output);
}
@@ -597,6 +6095,10 @@ public int getSerializedSize() {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(8, appAttemptNumber_);
}
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(9, fragmentSpec_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -614,10 +6116,10 @@ public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
- if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto)) {
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto)) {
return super.equals(obj);
}
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto) obj;
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto) obj;
boolean result = true;
result = result && (hasContainerIdString() == other.hasContainerIdString());
@@ -660,6 +6162,11 @@ public boolean equals(final java.lang.Object obj) {
result = result && (getAppAttemptNumber()
== other.getAppAttemptNumber());
}
+ result = result && (hasFragmentSpec() == other.hasFragmentSpec());
+ if (hasFragmentSpec()) {
+ result = result && getFragmentSpec()
+ .equals(other.getFragmentSpec());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -705,58 +6212,62 @@ public int hashCode() {
hash = (37 * hash) + APP_ATTEMPT_NUMBER_FIELD_NUMBER;
hash = (53 * hash) + getAppAttemptNumber();
}
+ if (hasFragmentSpec()) {
+ hash = (37 * hash) + FRAGMENT_SPEC_FIELD_NUMBER;
+ hash = (53 * hash) + getFragmentSpec().hashCode();
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(byte[] data)
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseDelimitedFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseDelimitedFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@@ -765,7 +6276,7 @@ public int hashCode() {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto prototype) {
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@@ -777,24 +6288,24 @@ protected Builder newBuilderForType(
return builder;
}
/**
- * Protobuf type {@code RunContainerRequestProto}
+ * Protobuf type {@code SubmitWorkRequestProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
- implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProtoOrBuilder {
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerRequestProto_descriptor;
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerRequestProto_fieldAccessorTable
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.Builder.class);
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.Builder.class);
}
- // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.newBuilder()
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
@@ -806,6 +6317,7 @@ private Builder(
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getFragmentSpecFieldBuilder();
}
}
private static Builder create() {
@@ -830,6 +6342,12 @@ public Builder clear() {
bitField0_ = (bitField0_ & ~0x00000040);
appAttemptNumber_ = 0;
bitField0_ = (bitField0_ & ~0x00000080);
+ if (fragmentSpecBuilder_ == null) {
+ fragmentSpec_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.getDefaultInstance();
+ } else {
+ fragmentSpecBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000100);
return this;
}
@@ -839,23 +6357,23 @@ public Builder clone() {
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerRequestProto_descriptor;
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkRequestProto_descriptor;
}
- public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto getDefaultInstanceForType() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.getDefaultInstance();
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance();
}
- public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto build() {
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto result = buildPartial();
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto buildPartial() {
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto(this);
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
@@ -890,22 +6408,30 @@ public Builder clone() {
to_bitField0_ |= 0x00000080;
}
result.appAttemptNumber_ = appAttemptNumber_;
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000100;
+ }
+ if (fragmentSpecBuilder_ == null) {
+ result.fragmentSpec_ = fragmentSpec_;
+ } else {
+ result.fragmentSpec_ = fragmentSpecBuilder_.build();
+ }
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto) {
- return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto)other);
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
- public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto other) {
- if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.getDefaultInstance()) return this;
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance()) return this;
if (other.hasContainerIdString()) {
bitField0_ |= 0x00000001;
containerIdString_ = other.containerIdString_;
@@ -940,6 +6466,9 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
if (other.hasAppAttemptNumber()) {
setAppAttemptNumber(other.getAppAttemptNumber());
}
+ if (other.hasFragmentSpec()) {
+ mergeFragmentSpec(other.getFragmentSpec());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -952,11 +6481,11 @@ public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto parsedMessage = null;
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto) e.getUnfinishedMessage();
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
@@ -1439,39 +6968,156 @@ public Builder clearAppAttemptNumber() {
return this;
}
- // @@protoc_insertion_point(builder_scope:RunContainerRequestProto)
+ // optional .FragmentSpecProto fragment_spec = 9;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto fragmentSpec_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProtoOrBuilder> fragmentSpecBuilder_;
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ public boolean hasFragmentSpec() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto getFragmentSpec() {
+ if (fragmentSpecBuilder_ == null) {
+ return fragmentSpec_;
+ } else {
+ return fragmentSpecBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ public Builder setFragmentSpec(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto value) {
+ if (fragmentSpecBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ fragmentSpec_ = value;
+ onChanged();
+ } else {
+ fragmentSpecBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ public Builder setFragmentSpec(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.Builder builderForValue) {
+ if (fragmentSpecBuilder_ == null) {
+ fragmentSpec_ = builderForValue.build();
+ onChanged();
+ } else {
+ fragmentSpecBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ public Builder mergeFragmentSpec(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto value) {
+ if (fragmentSpecBuilder_ == null) {
+ if (((bitField0_ & 0x00000100) == 0x00000100) &&
+ fragmentSpec_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.getDefaultInstance()) {
+ fragmentSpec_ =
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.newBuilder(fragmentSpec_).mergeFrom(value).buildPartial();
+ } else {
+ fragmentSpec_ = value;
+ }
+ onChanged();
+ } else {
+ fragmentSpecBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ public Builder clearFragmentSpec() {
+ if (fragmentSpecBuilder_ == null) {
+ fragmentSpec_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.getDefaultInstance();
+ onChanged();
+ } else {
+ fragmentSpecBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000100);
+ return this;
+ }
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.Builder getFragmentSpecBuilder() {
+ bitField0_ |= 0x00000100;
+ onChanged();
+ return getFragmentSpecFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProtoOrBuilder getFragmentSpecOrBuilder() {
+ if (fragmentSpecBuilder_ != null) {
+ return fragmentSpecBuilder_.getMessageOrBuilder();
+ } else {
+ return fragmentSpec_;
+ }
+ }
+ /**
+ * optional .FragmentSpecProto fragment_spec = 9;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProtoOrBuilder>
+ getFragmentSpecFieldBuilder() {
+ if (fragmentSpecBuilder_ == null) {
+ fragmentSpecBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProtoOrBuilder>(
+ fragmentSpec_,
+ getParentForChildren(),
+ isClean());
+ fragmentSpec_ = null;
+ }
+ return fragmentSpecBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:SubmitWorkRequestProto)
}
static {
- defaultInstance = new RunContainerRequestProto(true);
+ defaultInstance = new SubmitWorkRequestProto(true);
defaultInstance.initFields();
}
- // @@protoc_insertion_point(class_scope:RunContainerRequestProto)
+ // @@protoc_insertion_point(class_scope:SubmitWorkRequestProto)
}
- public interface RunContainerResponseProtoOrBuilder
+ public interface SubmitWorkResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
- * Protobuf type {@code RunContainerResponseProto}
+ * Protobuf type {@code SubmitWorkResponseProto}
*/
- public static final class RunContainerResponseProto extends
+ public static final class SubmitWorkResponseProto extends
com.google.protobuf.GeneratedMessage
- implements RunContainerResponseProtoOrBuilder {
- // Use RunContainerResponseProto.newBuilder() to construct.
- private RunContainerResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ implements SubmitWorkResponseProtoOrBuilder {
+ // Use SubmitWorkResponseProto.newBuilder() to construct.
+ private SubmitWorkResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
- private RunContainerResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+ private SubmitWorkResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
- private static final RunContainerResponseProto defaultInstance;
- public static RunContainerResponseProto getDefaultInstance() {
+ private static final SubmitWorkResponseProto defaultInstance;
+ public static SubmitWorkResponseProto getDefaultInstance() {
return defaultInstance;
}
- public RunContainerResponseProto getDefaultInstanceForType() {
+ public SubmitWorkResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
@@ -1481,7 +7127,7 @@ public RunContainerResponseProto getDefaultInstanceForType() {
getUnknownFields() {
return this.unknownFields;
}
- private RunContainerResponseProto(
+ private SubmitWorkResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
@@ -1517,28 +7163,28 @@ private RunContainerResponseProto(
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerResponseProto_descriptor;
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerResponseProto_fieldAccessorTable
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.Builder.class);
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.Builder.class);
}
- public static com.google.protobuf.Parser PARSER =
- new com.google.protobuf.AbstractParser() {
- public RunContainerResponseProto parsePartialFrom(
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public SubmitWorkResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return new RunContainerResponseProto(input, extensionRegistry);
+ return new SubmitWorkResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
- public com.google.protobuf.Parser getParserForType() {
+ public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@@ -1582,10 +7228,10 @@ public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
- if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto)) {
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto)) {
return super.equals(obj);
}
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto) obj;
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto) obj;
boolean result = true;
result = result &&
@@ -1606,53 +7252,53 @@ public int hashCode() {
return hash;
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(byte[] data)
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseDelimitedFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseDelimitedFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parseFrom(
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@@ -1661,7 +7307,7 @@ public int hashCode() {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto prototype) {
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@@ -1673,24 +7319,24 @@ protected Builder newBuilderForType(
return builder;
}
/**
- * Protobuf type {@code RunContainerResponseProto}
+ * Protobuf type {@code SubmitWorkResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
- implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProtoOrBuilder {
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerResponseProto_descriptor;
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerResponseProto_fieldAccessorTable
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.Builder.class);
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.Builder.class);
}
- // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.newBuilder()
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
@@ -1719,38 +7365,38 @@ public Builder clone() {
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RunContainerResponseProto_descriptor;
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkResponseProto_descriptor;
}
- public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto getDefaultInstanceForType() {
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance();
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance();
}
- public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto build() {
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto result = buildPartial();
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto buildPartial() {
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto(this);
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto) {
- return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto)other);
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
- public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto other) {
- if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance()) return this;
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -1763,11 +7409,11 @@ public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto parsedMessage = null;
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto) e.getUnfinishedMessage();
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
@@ -1777,15 +7423,15 @@ public Builder mergeFrom(
return this;
}
- // @@protoc_insertion_point(builder_scope:RunContainerResponseProto)
+ // @@protoc_insertion_point(builder_scope:SubmitWorkResponseProto)
}
static {
- defaultInstance = new RunContainerResponseProto(true);
+ defaultInstance = new SubmitWorkResponseProto(true);
defaultInstance.initFields();
}
- // @@protoc_insertion_point(class_scope:RunContainerResponseProto)
+ // @@protoc_insertion_point(class_scope:SubmitWorkResponseProto)
}
/**
@@ -1797,12 +7443,12 @@ protected LlapDaemonProtocol() {}
public interface Interface {
/**
- * rpc runContainer(.RunContainerRequestProto) returns (.RunContainerResponseProto);
+ * rpc submitWork(.SubmitWorkRequestProto) returns (.SubmitWorkResponseProto);
*/
- public abstract void runContainer(
+ public abstract void submitWork(
com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request,
- com.google.protobuf.RpcCallback done);
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request,
+ com.google.protobuf.RpcCallback done);
}
@@ -1810,11 +7456,11 @@ public abstract void runContainer(
final Interface impl) {
return new LlapDaemonProtocol() {
@java.lang.Override
- public void runContainer(
+ public void submitWork(
com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request,
- com.google.protobuf.RpcCallback done) {
- impl.runContainer(controller, request, done);
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.submitWork(controller, request, done);
}
};
@@ -1840,7 +7486,7 @@ public void runContainer(
}
switch(method.getIndex()) {
case 0:
- return impl.runContainer(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto)request);
+ return impl.submitWork(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -1856,7 +7502,7 @@ public void runContainer(
}
switch(method.getIndex()) {
case 0:
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.getDefaultInstance();
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -1872,7 +7518,7 @@ public void runContainer(
}
switch(method.getIndex()) {
case 0:
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance();
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -1882,12 +7528,12 @@ public void runContainer(
}
/**
- * rpc runContainer(.RunContainerRequestProto) returns (.RunContainerResponseProto);
+ * rpc submitWork(.SubmitWorkRequestProto) returns (.SubmitWorkResponseProto);
*/
- public abstract void runContainer(
+ public abstract void submitWork(
com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request,
- com.google.protobuf.RpcCallback done);
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request,
+ com.google.protobuf.RpcCallback done);
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
@@ -1912,8 +7558,8 @@ public final void callMethod(
}
switch(method.getIndex()) {
case 0:
- this.runContainer(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto)request,
- com.google.protobuf.RpcUtil.specializeCallback(
+ this.submitWork(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
default:
@@ -1931,7 +7577,7 @@ public final void callMethod(
}
switch(method.getIndex()) {
case 0:
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto.getDefaultInstance();
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -1947,7 +7593,7 @@ public final void callMethod(
}
switch(method.getIndex()) {
case 0:
- return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance();
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -1969,19 +7615,19 @@ private Stub(com.google.protobuf.RpcChannel channel) {
return channel;
}
- public void runContainer(
+ public void submitWork(
com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request,
- com.google.protobuf.RpcCallback done) {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request,
+ com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(0),
controller,
request,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance(),
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.class,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance()));
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.class,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance()));
}
}
@@ -1991,9 +7637,9 @@ public static BlockingInterface newBlockingStub(
}
public interface BlockingInterface {
- public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto runContainer(
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto submitWork(
com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request)
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request)
throws com.google.protobuf.ServiceException;
}
@@ -2004,15 +7650,15 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
private final com.google.protobuf.BlockingRpcChannel channel;
- public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto runContainer(
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto submitWork(
com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto request)
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request)
throws com.google.protobuf.ServiceException {
- return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto) channel.callBlockingMethod(
+ return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(0),
controller,
request,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto.getDefaultInstance());
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance());
}
}
@@ -2021,15 +7667,40 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
}
private static com.google.protobuf.Descriptors.Descriptor
- internal_static_RunContainerRequestProto_descriptor;
+ internal_static_UserPayloadProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_UserPayloadProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_EntityDescriptorProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_EntityDescriptorProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_IOSpecProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_IOSpecProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_GroupInputSpecProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_GroupInputSpecProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_FragmentSpecProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_RunContainerRequestProto_fieldAccessorTable;
+ internal_static_FragmentSpecProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
- internal_static_RunContainerResponseProto_descriptor;
+ internal_static_SubmitWorkRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_RunContainerResponseProto_fieldAccessorTable;
+ internal_static_SubmitWorkRequestProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_SubmitWorkResponseProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_SubmitWorkResponseProto_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -2039,35 +7710,84 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
descriptor;
static {
java.lang.String[] descriptorData = {
- "\n\030LlapDaemonProtocol.proto\"\330\001\n\030RunContai" +
- "nerRequestProto\022\033\n\023container_id_string\030\001" +
- " \001(\t\022\017\n\007am_host\030\002 \001(\t\022\017\n\007am_port\030\003 \001(\005\022\030" +
- "\n\020token_identifier\030\004 \001(\t\022\032\n\022credentials_" +
- "binary\030\005 \001(\014\022\014\n\004user\030\006 \001(\t\022\035\n\025applicatio" +
- "n_id_string\030\007 \001(\t\022\032\n\022app_attempt_number\030" +
- "\010 \001(\005\"\033\n\031RunContainerResponseProto2[\n\022Ll" +
- "apDaemonProtocol\022E\n\014runContainer\022\031.RunCo" +
- "ntainerRequestProto\032\032.RunContainerRespon" +
- "seProtoBH\n&org.apache.hadoop.hive.llap.d",
- "aemon.rpcB\030LlapDaemonProtocolProtos\210\001\001\240\001" +
- "\001"
+ "\n\030LlapDaemonProtocol.proto\"9\n\020UserPayloa" +
+ "dProto\022\024\n\014user_payload\030\001 \001(\014\022\017\n\007version\030" +
+ "\002 \001(\005\"j\n\025EntityDescriptorProto\022\022\n\nclass_" +
+ "name\030\001 \001(\t\022\'\n\014user_payload\030\002 \001(\0132\021.UserP" +
+ "ayloadProto\022\024\n\014history_text\030\003 \001(\014\"x\n\013IOS" +
+ "pecProto\022\035\n\025connected_vertex_name\030\001 \001(\t\022" +
+ "-\n\rio_descriptor\030\002 \001(\0132\026.EntityDescripto" +
+ "rProto\022\033\n\023physical_edge_count\030\003 \001(\005\"z\n\023G" +
+ "roupInputSpecProto\022\022\n\ngroup_name\030\001 \001(\t\022\026" +
+ "\n\016group_vertices\030\002 \003(\t\0227\n\027merged_input_d",
+ "escriptor\030\003 \001(\0132\026.EntityDescriptorProto\"" +
+ "\327\002\n\021FragmentSpecProto\022\036\n\026task_attempt_id" +
+ "_string\030\001 \001(\t\022\020\n\010dag_name\030\002 \001(\t\022\023\n\013verte" +
+ "x_name\030\003 \001(\t\0224\n\024processor_descriptor\030\004 \001" +
+ "(\0132\026.EntityDescriptorProto\022!\n\013input_spec" +
+ "s\030\005 \003(\0132\014.IOSpecProto\022\"\n\014output_specs\030\006 " +
+ "\003(\0132\014.IOSpecProto\0221\n\023grouped_input_specs" +
+ "\030\007 \003(\0132\024.GroupInputSpecProto\022\032\n\022vertex_p" +
+ "arallelism\030\010 \001(\005\022\027\n\017fragment_number\030\t \001(" +
+ "\005\022\026\n\016attempt_number\030\n \001(\005\"\201\002\n\026SubmitWork",
+ "RequestProto\022\033\n\023container_id_string\030\001 \001(" +
+ "\t\022\017\n\007am_host\030\002 \001(\t\022\017\n\007am_port\030\003 \001(\005\022\030\n\020t" +
+ "oken_identifier\030\004 \001(\t\022\032\n\022credentials_bin" +
+ "ary\030\005 \001(\014\022\014\n\004user\030\006 \001(\t\022\035\n\025application_i" +
+ "d_string\030\007 \001(\t\022\032\n\022app_attempt_number\030\010 \001" +
+ "(\005\022)\n\rfragment_spec\030\t \001(\0132\022.FragmentSpec" +
+ "Proto\"\031\n\027SubmitWorkResponseProto2U\n\022Llap" +
+ "DaemonProtocol\022?\n\nsubmitWork\022\027.SubmitWor" +
+ "kRequestProto\032\030.SubmitWorkResponseProtoB" +
+ "H\n&org.apache.hadoop.hive.llap.daemon.rp",
+ "cB\030LlapDaemonProtocolProtos\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
- internal_static_RunContainerRequestProto_descriptor =
+ internal_static_UserPayloadProto_descriptor =
getDescriptor().getMessageTypes().get(0);
- internal_static_RunContainerRequestProto_fieldAccessorTable = new
+ internal_static_UserPayloadProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_RunContainerRequestProto_descriptor,
- new java.lang.String[] { "ContainerIdString", "AmHost", "AmPort", "TokenIdentifier", "CredentialsBinary", "User", "ApplicationIdString", "AppAttemptNumber", });
- internal_static_RunContainerResponseProto_descriptor =
+ internal_static_UserPayloadProto_descriptor,
+ new java.lang.String[] { "UserPayload", "Version", });
+ internal_static_EntityDescriptorProto_descriptor =
getDescriptor().getMessageTypes().get(1);
- internal_static_RunContainerResponseProto_fieldAccessorTable = new
+ internal_static_EntityDescriptorProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_EntityDescriptorProto_descriptor,
+ new java.lang.String[] { "ClassName", "UserPayload", "HistoryText", });
+ internal_static_IOSpecProto_descriptor =
+ getDescriptor().getMessageTypes().get(2);
+ internal_static_IOSpecProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_IOSpecProto_descriptor,
+ new java.lang.String[] { "ConnectedVertexName", "IoDescriptor", "PhysicalEdgeCount", });
+ internal_static_GroupInputSpecProto_descriptor =
+ getDescriptor().getMessageTypes().get(3);
+ internal_static_GroupInputSpecProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_GroupInputSpecProto_descriptor,
+ new java.lang.String[] { "GroupName", "GroupVertices", "MergedInputDescriptor", });
+ internal_static_FragmentSpecProto_descriptor =
+ getDescriptor().getMessageTypes().get(4);
+ internal_static_FragmentSpecProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_FragmentSpecProto_descriptor,
+ new java.lang.String[] { "TaskAttemptIdString", "DagName", "VertexName", "ProcessorDescriptor", "InputSpecs", "OutputSpecs", "GroupedInputSpecs", "VertexParallelism", "FragmentNumber", "AttemptNumber", });
+ internal_static_SubmitWorkRequestProto_descriptor =
+ getDescriptor().getMessageTypes().get(5);
+ internal_static_SubmitWorkRequestProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_SubmitWorkRequestProto_descriptor,
+ new java.lang.String[] { "ContainerIdString", "AmHost", "AmPort", "TokenIdentifier", "CredentialsBinary", "User", "ApplicationIdString", "AppAttemptNumber", "FragmentSpec", });
+ internal_static_SubmitWorkResponseProto_descriptor =
+ getDescriptor().getMessageTypes().get(6);
+ internal_static_SubmitWorkResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_RunContainerResponseProto_descriptor,
+ internal_static_SubmitWorkResponseProto_descriptor,
new java.lang.String[] { });
return null;
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
index 7dc5a51..0b1303d 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
@@ -16,9 +16,9 @@
import java.io.IOException;
-import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
public interface ContainerRunner {
- void queueContainer(LlapDaemonProtocolProtos.RunContainerRequestProto request) throws IOException;
+ void submitWork(SubmitWorkRequestProto request) throws IOException;
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java
index 3177501..b4fc618 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java
@@ -29,6 +29,7 @@
private static final String HISTORY_VERTEX_NAME = "VertexName";
private static final String HISTORY_TASK_ID = "TaskId";
private static final String HISTORY_ATTEMPT_ID = "TaskAttemptId";
+ private static final String HISTORY_THREAD_NAME = "ThreadName";
private static final String HISTORY_HOSTNAME = "HostName";
private static final String HISTORY_SUCCEEDED = "Succeeded";
@@ -48,9 +49,9 @@ public static void logFragmentStart(String applicationIdStr, String containerIdS
public static void logFragmentEnd(String applicationIdStr, String containerIdStr, String hostname,
String dagName, String vertexName, int taskId, int attemptId,
- long startTime, boolean failed) {
+ String threadName, long startTime, boolean failed) {
HISTORY_LOGGER.info(constructFragmentEndString(applicationIdStr, containerIdStr, hostname,
- dagName, vertexName, taskId, attemptId, startTime, failed));
+ dagName, vertexName, taskId, attemptId, threadName, startTime, failed));
}
@@ -72,7 +73,7 @@ private static String constructFragmentStartString(String applicationIdStr, Stri
private static String constructFragmentEndString(String applicationIdStr, String containerIdStr,
String hostname, String dagName,
String vertexName, int taskId, int attemptId,
- long startTime, boolean succeeded) {
+ String threadName, long startTime, boolean succeeded) {
HistoryLineBuilder lb = new HistoryLineBuilder(EVENT_TYPE_FRAGMENT_END);
lb.addHostName(hostname);
lb.addAppid(applicationIdStr);
@@ -81,6 +82,7 @@ private static String constructFragmentEndString(String applicationIdStr, String
lb.addVertexName(vertexName);
lb.addTaskId(taskId);
lb.addTaskAttemptId(attemptId);
+ lb.addThreadName(threadName);
lb.addSuccessStatus(succeeded);
lb.addTime(HISTORY_START_TIME, startTime);
lb.addTime(HISTORY_END_TIME);
@@ -122,6 +124,10 @@ HistoryLineBuilder addTaskAttemptId(int attemptId) {
return setKeyValue(HISTORY_ATTEMPT_ID, String.valueOf(attemptId));
}
+ HistoryLineBuilder addThreadName(String threadName) {
+ return setKeyValue(HISTORY_THREAD_NAME, threadName);
+ }
+
HistoryLineBuilder addTime(String timeParam, long millis) {
return setKeyValue(timeParam, String.valueOf(millis));
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonConfiguration.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonConfiguration.java
index b24c21d..2f405d9 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonConfiguration.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonConfiguration.java
@@ -57,8 +57,9 @@ public LlapDaemonConfiguration() {
// Section for configs used in the AM //
- public static final String LLAP_DAEMON_AM_SERVICE_HOSTS = LLAP_DAEMON_PREFIX + "service.hosts";
- public static final String LLAP_DAEMON_AM_COMMUNICATOR_NUM_THREADS = LLAP_DAEMON_PREFIX + "communicator.num.threads";
- public static final int LLAP_DAEMON_AM_COMMUNICATOR_NUM_THREADS_DEFAULT = 5;
+ public static final String LLAP_DAEMON_SERVICE_HOSTS = LLAP_DAEMON_PREFIX + "service.hosts";
+
+ public static final String LLAP_DAEMON_COMMUNICATOR_NUM_THREADS = LLAP_DAEMON_PREFIX + "communicator.num.threads";
+ public static final int LLAP_DAEMON_COMMUNICATOR_NUM_THREADS_DEFAULT = 5;
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
index cfbe1b6..d485f87 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -18,37 +18,50 @@
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
+import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.CallableWithNdc;
import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
import org.apache.hadoop.hive.llap.daemon.HistoryLogger;
-import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler;
+import org.apache.hadoop.hive.llap.tezplugins.Converters;
import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.util.AuxiliaryServiceHelper;
import org.apache.log4j.Logger;
import org.apache.log4j.NDC;
+import org.apache.tez.common.TezCommonUtils;
+import org.apache.tez.common.TezTaskUmbilicalProtocol;
import org.apache.tez.common.security.JobTokenIdentifier;
import org.apache.tez.common.security.TokenCache;
+import org.apache.tez.dag.api.TezConfiguration;
import org.apache.tez.dag.api.TezConstants;
+import org.apache.tez.dag.api.TezException;
import org.apache.tez.runtime.api.ExecutionContext;
import org.apache.tez.runtime.api.impl.ExecutionContextImpl;
import org.apache.tez.runtime.common.objectregistry.ObjectRegistryImpl;
-import org.apache.tez.runtime.task.TezChild;
+import org.apache.tez.runtime.task.TaskReporter;
import org.apache.tez.runtime.task.TezChild.ContainerExecutionResult;
import com.google.common.base.Preconditions;
@@ -59,6 +72,7 @@
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.tez.runtime.task.TezTaskRunner;
public class ContainerRunnerImpl extends AbstractService implements ContainerRunner {
@@ -73,6 +87,7 @@
private volatile FileSystem localFs;
private final long memoryPerExecutor;
private final LlapDaemonExecutorMetrics metrics;
+ private final ConfParams confParams;
// TODO Support for removing queued containers, interrupting / killing specific containers
public ContainerRunnerImpl(int numExecutors, String[] localDirsBase, int localShufflePort,
@@ -83,6 +98,13 @@ public ContainerRunnerImpl(int numExecutors, String[] localDirsBase, int localSh
"Invalid number of executors: " + numExecutors + ". Must be > 0");
this.localDirsBase = localDirsBase;
this.localAddress = localAddress;
+ this.confParams = new ConfParams();
+ // Setup to defaults to start with
+ confParams.amMaxEventsPerHeartbeat = TezConfiguration.TEZ_TASK_MAX_EVENTS_PER_HEARTBEAT_DEFAULT;
+ confParams.amHeartbeatIntervalMsMax =
+ TezConfiguration.TEZ_AM_RM_HEARTBEAT_INTERVAL_MS_MAX_DEFAULT;
+ confParams.amCounterHeartbeatInterval =
+ TezConfiguration.TEZ_TASK_AM_HEARTBEAT_COUNTER_INTERVAL_MS_DEFAULT;
ExecutorService raw = Executors.newFixedThreadPool(numExecutors,
new ThreadFactoryBuilder().setNameFormat(THREAD_NAME_FORMAT).build());
@@ -104,6 +126,16 @@ public ContainerRunnerImpl(int numExecutors, String[] localDirsBase, int localSh
public void serviceInit(Configuration conf) {
try {
localFs = FileSystem.getLocal(conf);
+ // TODO Fix visibility of these parameters - which
+ confParams.amCounterHeartbeatInterval = conf.getLong(
+ TezConfiguration.TEZ_TASK_AM_HEARTBEAT_COUNTER_INTERVAL_MS,
+ TezConfiguration.TEZ_TASK_AM_HEARTBEAT_COUNTER_INTERVAL_MS_DEFAULT);
+ confParams.amHeartbeatIntervalMsMax =
+ conf.getInt(TezConfiguration.TEZ_TASK_AM_HEARTBEAT_INTERVAL_MS,
+ TezConfiguration.TEZ_TASK_AM_HEARTBEAT_INTERVAL_MS_DEFAULT);
+ confParams.amMaxEventsPerHeartbeat =
+ conf.getInt(TezConfiguration.TEZ_TASK_MAX_EVENTS_PER_HEARTBEAT,
+ TezConfiguration.TEZ_TASK_MAX_EVENTS_PER_HEARTBEAT_DEFAULT);
} catch (IOException e) {
throw new RuntimeException("Failed to setup local filesystem instance", e);
}
@@ -129,12 +161,21 @@ private static String createAppSpecificLocalDir(String baseDir, String applicati
}
@Override
- public void queueContainer(RunContainerRequestProto request) throws IOException {
+ public void submitWork(SubmitWorkRequestProto request) throws IOException {
HistoryLogger.logFragmentStart(request.getApplicationIdString(), request.getContainerIdString(),
- localAddress.get().getHostName(), null, null, -1, -1);
+ localAddress.get().getHostName(), request.getFragmentSpec().getDagName(),
+ request.getFragmentSpec().getVertexName(), request.getFragmentSpec().getFragmentNumber(),
+ request.getFragmentSpec().getAttemptNumber());
LOG.info("Queuing container for execution: " + request);
// This is the start of container-annotated logging.
- NDC.push(request.getContainerIdString());
+ // TODO Reduce the length of this string. Way too verbose at the moment.
+ String ndcContextString =
+ request.getContainerIdString() + "_" +
+ request.getFragmentSpec().getDagName() + "_" +
+ request.getFragmentSpec().getVertexName() +
+ "_" + request.getFragmentSpec().getFragmentNumber() + "_" +
+ request.getFragmentSpec().getAttemptNumber();
+ NDC.push(ndcContextString);
try {
Map env = new HashMap();
// TODO What else is required in this environment map.
@@ -149,13 +190,14 @@ public void queueContainer(RunContainerRequestProto request) throws IOException
request.getUser());
localFs.mkdirs(new Path(localDirs[i]));
}
- LOG.info("DEBUG: Dirs are: " + Arrays.toString(localDirs));
+ // TODO Avoid this directory creation on each work-unit submission.
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Dirs are: " + Arrays.toString(localDirs));
+ }
// Setup workingDir. This is otherwise setup as Environment.PWD
// Used for re-localization, to add the user specified configuration (conf_pb_binary_stream)
- // TODO Set this up to read user configuration if required. Ideally, Inputs / Outputs should be self configured.
- // Setting this up correctly is more from framework components to setup security, ping intervals, etc.
String workingDir = localDirs[0];
Credentials credentials = new Credentials();
@@ -170,12 +212,11 @@ public void queueContainer(RunContainerRequestProto request) throws IOException
LOG.info("DEBUG: Registering request with the ShuffleHandler");
ShuffleHandler.get().registerApplication(request.getApplicationIdString(), jobToken, request.getUser());
- ContainerRunnerCallable callable = new ContainerRunnerCallable(request, new Configuration(getConfig()),
+ TaskRunnerCallable callable = new TaskRunnerCallable(request, new Configuration(getConfig()),
new ExecutionContextImpl(localAddress.get().getHostName()), env, localDirs,
- workingDir, credentials, memoryPerExecutor, localAddress.get().getHostName());
- ListenableFuture future = executorService
- .submit(callable);
- Futures.addCallback(future, new ContainerRunnerCallback(request, callable));
+ workingDir, credentials, memoryPerExecutor, confParams);
+ ListenableFuture future = executorService.submit(callable);
+ Futures.addCallback(future, new TaskRunnerCallback(request, callable));
metrics.incrExecutorTotalRequestsHandled();
metrics.incrExecutorNumQueuedRequests();
} finally {
@@ -183,28 +224,31 @@ public void queueContainer(RunContainerRequestProto request) throws IOException
}
}
- static class ContainerRunnerCallable extends CallableWithNdc {
+ static class TaskRunnerCallable extends CallableWithNdc {
- private final RunContainerRequestProto request;
+ private final SubmitWorkRequestProto request;
private final Configuration conf;
private final String workingDir;
private final String[] localDirs;
private final Map envMap;
- // TODO Is a null pid valid - will this work with multiple different ResourceMonitors ?
private final String pid = null;
private final ObjectRegistryImpl objectRegistry;
private final ExecutionContext executionContext;
private final Credentials credentials;
private final long memoryAvailable;
- private volatile TezChild tezChild;
- private final String localHostname;
+ private final ListeningExecutorService executor;
+ private final ConfParams confParams;
+ private volatile TezTaskRunner taskRunner;
+ private volatile TaskReporter taskReporter;
+ private TezTaskUmbilicalProtocol umbilical;
private volatile long startTime;
+ private volatile String threadName;
- ContainerRunnerCallable(RunContainerRequestProto request, Configuration conf,
+ TaskRunnerCallable(SubmitWorkRequestProto request, Configuration conf,
ExecutionContext executionContext, Map envMap,
String[] localDirs, String workingDir, Credentials credentials,
- long memoryAvailable, String localHostName) {
+ long memoryAvailable, ConfParams confParams) {
this.request = request;
this.conf = conf;
this.executionContext = executionContext;
@@ -214,40 +258,105 @@ public void queueContainer(RunContainerRequestProto request) throws IOException
this.objectRegistry = new ObjectRegistryImpl();
this.credentials = credentials;
this.memoryAvailable = memoryAvailable;
- this.localHostname = localHostName;
-
+ this.confParams = confParams;
+ // TODO This executor seems unnecessary. Here and TezChild
+ ExecutorService executorReal = Executors.newFixedThreadPool(1,
+ new ThreadFactoryBuilder()
+ .setDaemon(true)
+ .setNameFormat(
+ "TezTaskRunner_" + request.getFragmentSpec().getTaskAttemptIdString())
+ .build());
+ executor = MoreExecutors.listeningDecorator(executorReal);
}
@Override
protected ContainerExecutionResult callInternal() throws Exception {
this.startTime = System.currentTimeMillis();
+ this.threadName = Thread.currentThread().getName();
+ // TODO Consolidate this code with TezChild.
Stopwatch sw = new Stopwatch().start();
- tezChild =
- new TezChild(conf, request.getAmHost(), request.getAmPort(),
- request.getContainerIdString(),
- request.getTokenIdentifier(), request.getAppAttemptNumber(), workingDir, localDirs,
- envMap, objectRegistry, pid,
- executionContext, credentials, memoryAvailable, request.getUser(), null);
- ContainerExecutionResult result = tezChild.run();
+ UserGroupInformation taskUgi = UserGroupInformation.createRemoteUser(request.getUser());
+ taskUgi.addCredentials(credentials);
+
+ Token jobToken = TokenCache.getSessionToken(credentials);
+ Map serviceConsumerMetadata = new HashMap();
+ serviceConsumerMetadata.put(TezConstants.TEZ_SHUFFLE_HANDLER_SERVICE_ID,
+ TezCommonUtils.convertJobTokenToBytes(jobToken));
+ Multimap startedInputsMap = HashMultimap.create();
+
+ UserGroupInformation taskOwner =
+ UserGroupInformation.createRemoteUser(request.getTokenIdentifier());
+ final InetSocketAddress address =
+ NetUtils.createSocketAddrForHost(request.getAmHost(), request.getAmPort());
+ SecurityUtil.setTokenService(jobToken, address);
+ taskOwner.addToken(jobToken);
+ umbilical = taskOwner.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public TezTaskUmbilicalProtocol run() throws Exception {
+ return RPC.getProxy(TezTaskUmbilicalProtocol.class,
+ TezTaskUmbilicalProtocol.versionID, address, conf);
+ }
+ });
+
+ taskReporter = new TaskReporter(
+ umbilical,
+ confParams.amHeartbeatIntervalMsMax,
+ confParams.amCounterHeartbeatInterval,
+ confParams.amMaxEventsPerHeartbeat,
+ new AtomicLong(0),
+ request.getContainerIdString());
+
+ taskRunner = new TezTaskRunner(conf, taskUgi, localDirs,
+ Converters.getTaskSpecfromProto(request.getFragmentSpec()), umbilical,
+ request.getAppAttemptNumber(),
+ serviceConsumerMetadata, envMap, startedInputsMap, taskReporter, executor, objectRegistry,
+ pid,
+ executionContext, memoryAvailable);
+
+ boolean shouldDie;
+ try {
+ shouldDie = !taskRunner.run();
+ if (shouldDie) {
+ LOG.info("Got a shouldDie notification via heartbeats. Shutting down");
+ return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.SUCCESS, null,
+ "Asked to die by the AM");
+ }
+ } catch (IOException e) {
+ return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.EXECUTION_FAILURE,
+ e, "TaskExecutionFailure: " + e.getMessage());
+ } catch (TezException e) {
+ return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.EXECUTION_FAILURE,
+ e, "TaskExecutionFailure: " + e.getMessage());
+ } finally {
+ // TODO Fix UGI and FS Handling. Closing UGI here causes some errors right now.
+// FileSystem.closeAllForUGI(taskUgi);
+ }
LOG.info("ExecutionTime for Container: " + request.getContainerIdString() + "=" +
sw.stop().elapsedMillis());
- return result;
+ return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.SUCCESS, null,
+ null);
}
- public TezChild getTezChild() {
- return this.tezChild;
+ public void shutdown() {
+ executor.shutdownNow();
+ if (taskReporter != null) {
+ taskReporter.shutdown();
+ }
+ if (umbilical != null) {
+ RPC.stopProxy(umbilical);
+ }
}
}
- final class ContainerRunnerCallback implements FutureCallback {
+ final class TaskRunnerCallback implements FutureCallback {
- private final RunContainerRequestProto request;
- private final ContainerRunnerCallable containerRunnerCallable;
+ private final SubmitWorkRequestProto request;
+ private final TaskRunnerCallable taskRunnerCallable;
- ContainerRunnerCallback(RunContainerRequestProto request,
- ContainerRunnerCallable containerRunnerCallable) {
+ TaskRunnerCallback(SubmitWorkRequestProto request,
+ TaskRunnerCallable taskRunnerCallable) {
this.request = request;
- this.containerRunnerCallable = containerRunnerCallable;
+ this.taskRunnerCallable = taskRunnerCallable;
}
// TODO Slightly more useful error handling
@@ -255,51 +364,62 @@ public TezChild getTezChild() {
public void onSuccess(ContainerExecutionResult result) {
switch (result.getExitStatus()) {
case SUCCESS:
- LOG.info("Successfully finished: " + request.getApplicationIdString() + ", containerId=" +
- request.getContainerIdString());
+ LOG.info("Successfully finished: " + getTaskIdentifierString(request));
metrics.incrExecutorTotalSuccess();
break;
case EXECUTION_FAILURE:
- LOG.info("Failed to run: " + request.getApplicationIdString() + ", containerId=" +
- request.getContainerIdString(), result.getThrowable());
+ LOG.info("Failed to run: " + getTaskIdentifierString(request));
metrics.incrExecutorTotalExecutionFailed();
break;
case INTERRUPTED:
- LOG.info(
- "Interrupted while running: " + request.getApplicationIdString() + ", containerId=" +
- request.getContainerIdString(), result.getThrowable());
+ LOG.info("Interrupted while running: " + getTaskIdentifierString(request));
metrics.incrExecutorTotalInterrupted();
break;
case ASKED_TO_DIE:
- LOG.info(
- "Asked to die while running: " + request.getApplicationIdString() + ", containerId=" +
- request.getContainerIdString());
+ LOG.info("Asked to die while running: " + getTaskIdentifierString(request));
metrics.incrExecutorTotalAskedToDie();
break;
}
+ taskRunnerCallable.shutdown();
HistoryLogger
- .logFragmentEnd(request.getApplicationIdString(),
- request.getContainerIdString(),
- localAddress.get().getHostName(), null, null, -1, -1,
- containerRunnerCallable.startTime, true);
+ .logFragmentEnd(request.getApplicationIdString(), request.getContainerIdString(),
+ localAddress.get().getHostName(), request.getFragmentSpec().getDagName(),
+ request.getFragmentSpec().getVertexName(),
+ request.getFragmentSpec().getFragmentNumber(),
+ request.getFragmentSpec().getAttemptNumber(), taskRunnerCallable.threadName,
+ taskRunnerCallable.startTime, true);
metrics.decrExecutorNumQueuedRequests();
}
@Override
public void onFailure(Throwable t) {
- LOG.error(
- "TezChild execution failed for : " + request.getApplicationIdString() + ", containerId=" +
- request.getContainerIdString());
- TezChild tezChild = containerRunnerCallable.getTezChild();
- if (tezChild != null) {
- tezChild.shutdown();
- }
+ LOG.error("TezTaskRunner execution failed for : " + getTaskIdentifierString(request), t);
+ taskRunnerCallable.shutdown();
HistoryLogger
- .logFragmentEnd(request.getApplicationIdString(),
- request.getContainerIdString(),
- localAddress.get().getHostName(), null, null, -1, -1,
- containerRunnerCallable.startTime, false);
+ .logFragmentEnd(request.getApplicationIdString(), request.getContainerIdString(),
+ localAddress.get().getHostName(), request.getFragmentSpec().getDagName(),
+ request.getFragmentSpec().getVertexName(),
+ request.getFragmentSpec().getFragmentNumber(),
+ request.getFragmentSpec().getAttemptNumber(), taskRunnerCallable.threadName,
+ taskRunnerCallable.startTime, false);
metrics.decrExecutorNumQueuedRequests();
}
+
+ private String getTaskIdentifierString(SubmitWorkRequestProto request) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("AppId=").append(request.getApplicationIdString())
+ .append(", containerId=").append(request.getContainerIdString())
+ .append(", Dag=").append(request.getFragmentSpec().getDagName())
+ .append(", Vertex=").append(request.getFragmentSpec().getVertexName())
+ .append(", FragmentNum=").append(request.getFragmentSpec().getFragmentNumber())
+ .append(", Attempt=").append(request.getFragmentSpec().getAttemptNumber());
+ return sb.toString();
+ }
+ }
+
+ private static class ConfParams {
+ int amHeartbeatIntervalMsMax;
+ long amCounterHeartbeatInterval;
+ int amMaxEventsPerHeartbeat;
}
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index f67cf84..e75aa8d 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -24,7 +24,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
import org.apache.hadoop.hive.llap.daemon.LlapDaemonConfiguration;
-import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
import org.apache.hadoop.hive.llap.io.api.LlapIoProxy;
import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
import org.apache.hadoop.hive.llap.metrics.LlapMetricsSystem;
@@ -190,8 +190,9 @@ public static void main(String[] args) throws Exception {
}
@Override
- public void queueContainer(RunContainerRequestProto request) throws IOException {
- containerRunner.queueContainer(request);
+ public void submitWork(LlapDaemonProtocolProtos.SubmitWorkRequestProto request) throws
+ IOException {
+ containerRunner.submitWork(request);
}
// LlapDaemonMXBean methods. Will be exposed via JMX
@@ -224,4 +225,6 @@ public long getMemoryPerInstance() {
public long getMaxJvmMemory() {
return maxJvmMemory;
}
+
+
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolClientImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolClientImpl.java
index 3e6fc0a..ae74ae8 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolClientImpl.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolClientImpl.java
@@ -20,12 +20,11 @@
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.hive.llap.daemon.LlapDaemonProtocolBlockingPB;
-import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto;
-import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto;
// TODO Change all this to be based on a regular interface instead of relying on the Proto service - Exception signatures cannot be controlled without this for the moment.
@@ -43,17 +42,16 @@ public LlapDaemonProtocolClientImpl(Configuration conf, String hostname, int por
}
@Override
- public RunContainerResponseProto runContainer(RpcController controller,
- RunContainerRequestProto request) throws
+ public LlapDaemonProtocolProtos.SubmitWorkResponseProto submitWork(RpcController controller,
+ LlapDaemonProtocolProtos.SubmitWorkRequestProto request) throws
ServiceException {
try {
- return getProxy().runContainer(null, request);
+ return getProxy().submitWork(null, request);
} catch (IOException e) {
throw new ServiceException(e);
}
}
-
public LlapDaemonProtocolBlockingPB getProxy() throws IOException {
if (proxy == null) {
proxy = createProxy();
@@ -65,8 +63,7 @@ public LlapDaemonProtocolBlockingPB createProxy() throws IOException {
LlapDaemonProtocolBlockingPB p;
// TODO Fix security
RPC.setProtocolEngine(conf, LlapDaemonProtocolBlockingPB.class, ProtobufRpcEngine.class);
- p = (LlapDaemonProtocolBlockingPB) RPC
- .getProxy(LlapDaemonProtocolBlockingPB.class, 0, serverAddr, conf);
+ p = RPC.getProxy(LlapDaemonProtocolBlockingPB.class, 0, serverAddr, conf);
return p;
}
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolServerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolServerImpl.java
index 4703904..5d36bbc 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolServerImpl.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolServerImpl.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
@@ -34,8 +35,6 @@
import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
import org.apache.hadoop.hive.llap.daemon.LlapDaemonConfiguration;
import org.apache.hadoop.hive.llap.daemon.LlapDaemonProtocolBlockingPB;
-import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto;
-import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerResponseProto;
public class LlapDaemonProtocolServerImpl extends AbstractService
implements LlapDaemonProtocolBlockingPB {
@@ -58,19 +57,18 @@ public LlapDaemonProtocolServerImpl(LlapDaemonConfiguration daemonConf,
}
@Override
- public RunContainerResponseProto runContainer(RpcController controller,
- RunContainerRequestProto request) throws
+ public SubmitWorkResponseProto submitWork(RpcController controller,
+ LlapDaemonProtocolProtos.SubmitWorkRequestProto request) throws
ServiceException {
- LOG.info("Received request: " + request);
+ LOG.info("DEBUG: Recevied request: " + request);
try {
- containerRunner.queueContainer(request);
+ containerRunner.submitWork(request);
} catch (IOException e) {
throw new ServiceException(e);
}
- return RunContainerResponseProto.getDefaultInstance();
+ return SubmitWorkResponseProto.getDefaultInstance();
}
-
@Override
public void serviceStart() {
Configuration conf = getConfig();
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/Converters.java llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/Converters.java
new file mode 100644
index 0000000..7e06f2b
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/Converters.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.tezplugins;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.protobuf.ByteString;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto;
+import org.apache.tez.common.TezCommonUtils;
+import org.apache.tez.dag.api.EntityDescriptor;
+import org.apache.tez.dag.api.InputDescriptor;
+import org.apache.tez.dag.api.OutputDescriptor;
+import org.apache.tez.dag.api.ProcessorDescriptor;
+import org.apache.tez.dag.api.TezUncheckedException;
+import org.apache.tez.dag.api.UserPayload;
+import org.apache.tez.dag.records.TezTaskAttemptID;
+import org.apache.tez.runtime.api.impl.GroupInputSpec;
+import org.apache.tez.runtime.api.impl.InputSpec;
+import org.apache.tez.runtime.api.impl.OutputSpec;
+import org.apache.tez.runtime.api.impl.TaskSpec;
+
+public class Converters {
+
+ public static TaskSpec getTaskSpecfromProto(FragmentSpecProto FragmentSpecProto) {
+ TezTaskAttemptID taskAttemptID =
+ TezTaskAttemptID.fromString(FragmentSpecProto.getTaskAttemptIdString());
+
+ ProcessorDescriptor processorDescriptor = null;
+ if (FragmentSpecProto.hasProcessorDescriptor()) {
+ processorDescriptor = convertProcessorDescriptorFromProto(
+ FragmentSpecProto.getProcessorDescriptor());
+ }
+
+ List inputSpecList = new ArrayList(FragmentSpecProto.getInputSpecsCount());
+ if (FragmentSpecProto.getInputSpecsCount() > 0) {
+ for (IOSpecProto inputSpecProto : FragmentSpecProto.getInputSpecsList()) {
+ inputSpecList.add(getInputSpecFromProto(inputSpecProto));
+ }
+ }
+
+ List outputSpecList =
+ new ArrayList(FragmentSpecProto.getOutputSpecsCount());
+ if (FragmentSpecProto.getOutputSpecsCount() > 0) {
+ for (IOSpecProto outputSpecProto : FragmentSpecProto.getOutputSpecsList()) {
+ outputSpecList.add(getOutputSpecFromProto(outputSpecProto));
+ }
+ }
+
+ List groupInputSpecs =
+ new ArrayList(FragmentSpecProto.getGroupedInputSpecsCount());
+ if (FragmentSpecProto.getGroupedInputSpecsCount() > 0) {
+ for (GroupInputSpecProto groupInputSpecProto : FragmentSpecProto.getGroupedInputSpecsList()) {
+ groupInputSpecs.add(getGroupInputSpecFromProto(groupInputSpecProto));
+ }
+ }
+
+ TaskSpec taskSpec =
+ new TaskSpec(taskAttemptID, FragmentSpecProto.getDagName(), FragmentSpecProto.getVertexName(),
+ FragmentSpecProto.getVertexParallelism(), processorDescriptor, inputSpecList,
+ outputSpecList, groupInputSpecs);
+ return taskSpec;
+ }
+
+ public static FragmentSpecProto convertTaskSpecToProto(TaskSpec taskSpec) {
+ FragmentSpecProto.Builder builder = FragmentSpecProto.newBuilder();
+ builder.setTaskAttemptIdString(taskSpec.getTaskAttemptID().toString());
+ builder.setDagName(taskSpec.getDAGName());
+ builder.setVertexName(taskSpec.getVertexName());
+ builder.setVertexParallelism(taskSpec.getVertexParallelism());
+ builder.setFragmentNumber(taskSpec.getTaskAttemptID().getTaskID().getId());
+ builder.setAttemptNumber(taskSpec.getTaskAttemptID().getId());
+
+ if (taskSpec.getProcessorDescriptor() != null) {
+ builder.setProcessorDescriptor(
+ convertToProto(taskSpec.getProcessorDescriptor()));
+ }
+
+ if (taskSpec.getInputs() != null && !taskSpec.getInputs().isEmpty()) {
+ for (InputSpec inputSpec : taskSpec.getInputs()) {
+ builder.addInputSpecs(convertInputSpecToProto(inputSpec));
+ }
+ }
+
+ if (taskSpec.getOutputs() != null && !taskSpec.getOutputs().isEmpty()) {
+ for (OutputSpec outputSpec : taskSpec.getOutputs()) {
+ builder.addOutputSpecs(convertOutputSpecToProto(outputSpec));
+ }
+ }
+
+ if (taskSpec.getGroupInputs() != null && !taskSpec.getGroupInputs().isEmpty()) {
+ for (GroupInputSpec groupInputSpec : taskSpec.getGroupInputs()) {
+ builder.addGroupedInputSpecs(convertGroupInputSpecToProto(groupInputSpec));
+
+ }
+ }
+ return builder.build();
+ }
+
+ private static ProcessorDescriptor convertProcessorDescriptorFromProto(
+ EntityDescriptorProto proto) {
+ String className = proto.getClassName();
+ UserPayload payload = convertPayloadFromProto(proto);
+ ProcessorDescriptor pd = ProcessorDescriptor.create(className);
+ setUserPayload(pd, payload);
+ return pd;
+ }
+
+ private static EntityDescriptorProto convertToProto(
+ EntityDescriptor> descriptor) {
+ EntityDescriptorProto.Builder builder = EntityDescriptorProto
+ .newBuilder();
+ builder.setClassName(descriptor.getClassName());
+
+ UserPayload userPayload = descriptor.getUserPayload();
+ if (userPayload != null) {
+ UserPayloadProto.Builder payloadBuilder = UserPayloadProto.newBuilder();
+ if (userPayload.hasPayload()) {
+ payloadBuilder.setUserPayload(ByteString.copyFrom(userPayload.getPayload()));
+ payloadBuilder.setVersion(userPayload.getVersion());
+ }
+ builder.setUserPayload(payloadBuilder.build());
+ }
+ if (descriptor.getHistoryText() != null) {
+ try {
+ builder.setHistoryText(TezCommonUtils.compressByteArrayToByteString(
+ descriptor.getHistoryText().getBytes("UTF-8")));
+ } catch (IOException e) {
+ throw new TezUncheckedException(e);
+ }
+ }
+ return builder.build();
+ }
+
+ private static InputSpec getInputSpecFromProto(IOSpecProto inputSpecProto) {
+ InputDescriptor inputDescriptor = null;
+ if (inputSpecProto.hasIoDescriptor()) {
+ inputDescriptor =
+ convertInputDescriptorFromProto(inputSpecProto.getIoDescriptor());
+ }
+ InputSpec inputSpec = new InputSpec(inputSpecProto.getConnectedVertexName(), inputDescriptor,
+ inputSpecProto.getPhysicalEdgeCount());
+ return inputSpec;
+ }
+
+ private static InputDescriptor convertInputDescriptorFromProto(
+ EntityDescriptorProto proto) {
+ String className = proto.getClassName();
+ UserPayload payload = convertPayloadFromProto(proto);
+ InputDescriptor id = InputDescriptor.create(className);
+ setUserPayload(id, payload);
+ return id;
+ }
+
+ private static OutputDescriptor convertOutputDescriptorFromProto(
+ EntityDescriptorProto proto) {
+ String className = proto.getClassName();
+ UserPayload payload = convertPayloadFromProto(proto);
+ OutputDescriptor od = OutputDescriptor.create(className);
+ setUserPayload(od, payload);
+ return od;
+ }
+
+ private static IOSpecProto convertInputSpecToProto(InputSpec inputSpec) {
+ IOSpecProto.Builder builder = IOSpecProto.newBuilder();
+ if (inputSpec.getSourceVertexName() != null) {
+ builder.setConnectedVertexName(inputSpec.getSourceVertexName());
+ }
+ if (inputSpec.getInputDescriptor() != null) {
+ builder.setIoDescriptor(convertToProto(inputSpec.getInputDescriptor()));
+ }
+ builder.setPhysicalEdgeCount(inputSpec.getPhysicalEdgeCount());
+ return builder.build();
+ }
+
+ private static OutputSpec getOutputSpecFromProto(IOSpecProto outputSpecProto) {
+ OutputDescriptor outputDescriptor = null;
+ if (outputSpecProto.hasIoDescriptor()) {
+ outputDescriptor =
+ convertOutputDescriptorFromProto(outputSpecProto.getIoDescriptor());
+ }
+ OutputSpec outputSpec =
+ new OutputSpec(outputSpecProto.getConnectedVertexName(), outputDescriptor,
+ outputSpecProto.getPhysicalEdgeCount());
+ return outputSpec;
+ }
+
+ public static IOSpecProto convertOutputSpecToProto(OutputSpec outputSpec) {
+ IOSpecProto.Builder builder = IOSpecProto.newBuilder();
+ if (outputSpec.getDestinationVertexName() != null) {
+ builder.setConnectedVertexName(outputSpec.getDestinationVertexName());
+ }
+ if (outputSpec.getOutputDescriptor() != null) {
+ builder.setIoDescriptor(convertToProto(outputSpec.getOutputDescriptor()));
+ }
+ builder.setPhysicalEdgeCount(outputSpec.getPhysicalEdgeCount());
+ return builder.build();
+ }
+
+ private static GroupInputSpec getGroupInputSpecFromProto(GroupInputSpecProto groupInputSpecProto) {
+ GroupInputSpec groupSpec = new GroupInputSpec(groupInputSpecProto.getGroupName(),
+ groupInputSpecProto.getGroupVerticesList(),
+ convertInputDescriptorFromProto(groupInputSpecProto.getMergedInputDescriptor()));
+ return groupSpec;
+ }
+
+ private static GroupInputSpecProto convertGroupInputSpecToProto(GroupInputSpec groupInputSpec) {
+ GroupInputSpecProto.Builder builder = GroupInputSpecProto.newBuilder();
+ builder.setGroupName(groupInputSpec.getGroupName());
+ builder.addAllGroupVertices(groupInputSpec.getGroupVertices());
+ builder.setMergedInputDescriptor(convertToProto(groupInputSpec.getMergedInputDescriptor()));
+ return builder.build();
+ }
+
+
+ private static void setUserPayload(EntityDescriptor> entity, UserPayload payload) {
+ if (payload != null) {
+ entity.setUserPayload(payload);
+ }
+ }
+
+ private static UserPayload convertPayloadFromProto(
+ EntityDescriptorProto proto) {
+ UserPayload userPayload = null;
+ if (proto.hasUserPayload()) {
+ if (proto.getUserPayload().hasUserPayload()) {
+ userPayload =
+ UserPayload.create(proto.getUserPayload().getUserPayload().asReadOnlyByteBuffer(), proto.getUserPayload().getVersion());
+ } else {
+ userPayload = UserPayload.create(null);
+ }
+ }
+ return userPayload;
+ }
+
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapContainerLauncher.java llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapContainerLauncher.java
new file mode 100644
index 0000000..07f96f5
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapContainerLauncher.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.tezplugins;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.tez.dag.app.AppContext;
+import org.apache.tez.dag.app.TaskAttemptListener;
+import org.apache.tez.dag.app.launcher.ContainerLauncher;
+import org.apache.tez.dag.app.rm.NMCommunicatorEvent;
+import org.apache.tez.dag.app.rm.NMCommunicatorLaunchRequestEvent;
+import org.apache.tez.dag.app.rm.container.AMContainerEvent;
+import org.apache.tez.dag.app.rm.container.AMContainerEventLaunched;
+import org.apache.tez.dag.app.rm.container.AMContainerEventType;
+import org.apache.tez.dag.history.DAGHistoryEvent;
+import org.apache.tez.dag.history.events.ContainerLaunchedEvent;
+
+public class LlapContainerLauncher extends AbstractService implements ContainerLauncher {
+ static final Log LOG = LogFactory.getLog(LlapContainerLauncher.class);
+
+ private final AppContext context;
+ private final Clock clock;
+
+ public LlapContainerLauncher(AppContext appContext, Configuration conf,
+ TaskAttemptListener tal) {
+ super(LlapContainerLauncher.class.getName());
+ this.context = appContext;
+ this.clock = appContext.getClock();
+ }
+
+ @Override
+ public void handle(NMCommunicatorEvent event) {
+ switch(event.getType()) {
+ case CONTAINER_LAUNCH_REQUEST:
+ final NMCommunicatorLaunchRequestEvent launchEvent = (NMCommunicatorLaunchRequestEvent) event;
+ LOG.info("No-op launch for container: " + launchEvent.getContainerId() + " succeeded on host: " + launchEvent.getNodeId());
+ context.getEventHandler().handle(new AMContainerEventLaunched(launchEvent.getContainerId()));
+ ContainerLaunchedEvent lEvt = new ContainerLaunchedEvent(
+ launchEvent.getContainerId(), clock.getTime(), context.getApplicationAttemptId());
+ context.getHistoryHandler().handle(new DAGHistoryEvent(
+ null, lEvt));
+ break;
+ case CONTAINER_STOP_REQUEST:
+ LOG.info("DEBUG: Ignoring STOP_REQUEST for event: " + event);
+ context.getEventHandler().handle(new AMContainerEvent(event.getContainerId(),
+ AMContainerEventType.C_NM_STOP_SENT));
+ break;
+ }
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
new file mode 100644
index 0000000..2304aeb
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.tezplugins;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import com.google.protobuf.ByteString;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.llap.daemon.LlapDaemonConfiguration;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.tez.dag.api.TaskCommunicatorContext;
+import org.apache.tez.dag.app.TezTaskCommunicatorImpl;
+import org.apache.tez.dag.records.TezTaskAttemptID;
+import org.apache.tez.runtime.api.impl.TaskSpec;
+
+public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
+
+ private static final Log LOG = LogFactory.getLog(LlapTaskCommunicator.class);
+
+ private final SubmitWorkRequestProto BASE_SUBMIT_WORK_REQUEST;
+ private final ConcurrentMap credentialMap;
+
+ private TaskCommunicator communicator;
+
+ public LlapTaskCommunicator(
+ TaskCommunicatorContext taskCommunicatorContext) {
+ super(taskCommunicatorContext);
+
+ SubmitWorkRequestProto.Builder baseBuilder = SubmitWorkRequestProto.newBuilder();
+
+ // TODO Avoid reading this from the environment
+ baseBuilder.setUser(System.getenv(ApplicationConstants.Environment.USER.name()));
+ baseBuilder.setApplicationIdString(
+ taskCommunicatorContext.getApplicationAttemptId().getApplicationId().toString());
+ baseBuilder
+ .setAppAttemptNumber(taskCommunicatorContext.getApplicationAttemptId().getAttemptId());
+ baseBuilder.setTokenIdentifier(getTokenIdentifier());
+
+ BASE_SUBMIT_WORK_REQUEST = baseBuilder.build();
+
+ credentialMap = new ConcurrentHashMap();
+ }
+
+ @Override
+ public void serviceInit(Configuration conf) throws Exception {
+ super.serviceInit(conf);
+ int numThreads = conf.getInt(LlapDaemonConfiguration.LLAP_DAEMON_COMMUNICATOR_NUM_THREADS,
+ LlapDaemonConfiguration.LLAP_DAEMON_COMMUNICATOR_NUM_THREADS_DEFAULT);
+ this.communicator = new TaskCommunicator(numThreads);
+ this.communicator.init(conf);
+ }
+
+ @Override
+ public void serviceStart() {
+ super.serviceStart();
+ this.communicator.start();
+ }
+
+ @Override
+ public void serviceStop() {
+ super.serviceStop();
+ if (this.communicator != null) {
+ this.communicator.stop();
+ }
+ }
+
+
+ @Override
+ public void registerRunningContainer(ContainerId containerId, String hostname, int port) {
+ super.registerRunningContainer(containerId, hostname, port);
+ }
+
+ @Override
+ public void registerContainerEnd(ContainerId containerId) {
+ super.registerContainerEnd(containerId);
+ }
+
+ @Override
+ public void registerRunningTaskAttempt(final ContainerId containerId, final TaskSpec taskSpec,
+ Map additionalResources,
+ Credentials credentials,
+ boolean credentialsChanged) {
+ super.registerRunningTaskAttempt(containerId, taskSpec, additionalResources, credentials,
+ credentialsChanged);
+ SubmitWorkRequestProto requestProto = null;
+ try {
+ requestProto = constructSubmitWorkRequest(containerId, taskSpec);
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to construct request", e);
+ }
+ ContainerInfo containerInfo = getContainerInfo(containerId);
+ String host;
+ int port;
+ if (containerInfo != null) {
+ synchronized (containerInfo) {
+ host = containerInfo.host;
+ port = containerInfo.port;
+ }
+ } else {
+ // TODO Handle this properly
+ throw new RuntimeException("ContainerInfo not found for container: " + containerId +
+ ", while trying to launch task: " + taskSpec.getTaskAttemptID());
+ }
+ communicator.submitWork(requestProto, host, port,
+ new TaskCommunicator.ExecuteRequestCallback() {
+ @Override
+ public void setResponse(SubmitWorkResponseProto response) {
+ LOG.info("Successfully launched task: " + taskSpec.getTaskAttemptID());
+ getTaskCommunicatorContext()
+ .taskStartedRemotely(taskSpec.getTaskAttemptID(), containerId);
+ }
+
+ @Override
+ public void indicateError(Throwable t) {
+ // TODO Handle this error. This is where an API on the context to indicate failure / rejection comes in.
+ LOG.info("Failed to run task: " + taskSpec.getTaskAttemptID() + " on containerId: " +
+ containerId, t);
+ }
+ });
+ }
+
+ @Override
+ public void unregisterRunningTaskAttempt(TezTaskAttemptID taskAttemptID) {
+ super.unregisterRunningTaskAttempt(taskAttemptID);
+ // Nothing else to do for now. The push API in the test does not support termination of a running task
+ }
+
+ private SubmitWorkRequestProto constructSubmitWorkRequest(ContainerId containerId,
+ TaskSpec taskSpec) throws
+ IOException {
+ SubmitWorkRequestProto.Builder builder =
+ SubmitWorkRequestProto.newBuilder(BASE_SUBMIT_WORK_REQUEST);
+ builder.setContainerIdString(containerId.toString());
+ builder.setAmHost(getAddress().getHostName());
+ builder.setAmPort(getAddress().getPort());
+ Credentials taskCredentials = new Credentials();
+ // Credentials can change across DAGs. Ideally construct only once per DAG.
+ taskCredentials.addAll(getTaskCommunicatorContext().getCredentials());
+
+ ByteBuffer credentialsBinary = credentialMap.get(taskSpec.getDAGName());
+ if (credentialsBinary == null) {
+ credentialsBinary = serializeCredentials(getTaskCommunicatorContext().getCredentials());
+ credentialMap.putIfAbsent(taskSpec.getDAGName(), credentialsBinary.duplicate());
+ } else {
+ credentialsBinary = credentialsBinary.duplicate();
+ }
+ builder.setCredentialsBinary(ByteString.copyFrom(credentialsBinary));
+ builder.setFragmentSpec(Converters.convertTaskSpecToProto(taskSpec));
+ return builder.build();
+ }
+
+ private ByteBuffer serializeCredentials(Credentials credentials) throws IOException {
+ Credentials containerCredentials = new Credentials();
+ containerCredentials.addAll(credentials);
+ DataOutputBuffer containerTokens_dob = new DataOutputBuffer();
+ containerCredentials.writeTokenStorageToStream(containerTokens_dob);
+ ByteBuffer containerCredentialsBuffer = ByteBuffer.wrap(containerTokens_dob.getData(), 0,
+ containerTokens_dob.getLength());
+ return containerCredentialsBuffer;
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/TaskCommunicator.java llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/TaskCommunicator.java
new file mode 100644
index 0000000..1670a48
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/TaskCommunicator.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.tezplugins;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.Message;
+import org.apache.hadoop.hive.llap.daemon.LlapDaemonProtocolBlockingPB;
+import org.apache.hadoop.hive.llap.daemon.impl.LlapDaemonProtocolClientImpl;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto;
+import org.apache.hadoop.service.AbstractService;
+
+public class TaskCommunicator extends AbstractService {
+
+ private final ConcurrentMap hostProxies;
+ private ListeningExecutorService executor;
+
+ public TaskCommunicator(int numThreads) {
+ super(TaskCommunicator.class.getSimpleName());
+ ExecutorService localExecutor = Executors.newFixedThreadPool(numThreads,
+ new ThreadFactoryBuilder().setNameFormat("TaskCommunicator #%2d").build());
+ this.hostProxies = new ConcurrentHashMap<>();
+ executor = MoreExecutors.listeningDecorator(localExecutor);
+ }
+
+ @Override
+ public void serviceStop() {
+ executor.shutdownNow();
+ }
+
+ public void submitWork(SubmitWorkRequestProto request, String host, int port,
+ final ExecuteRequestCallback callback) {
+ ListenableFuture future = executor.submit(new SubmitWorkCallable(request, host, port));
+ Futures.addCallback(future, new FutureCallback() {
+ @Override
+ public void onSuccess(SubmitWorkResponseProto result) {
+ callback.setResponse(result);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ callback.indicateError(t);
+ }
+ });
+
+ }
+
+ private class SubmitWorkCallable implements Callable {
+ final String hostname;
+ final int port;
+ final SubmitWorkRequestProto request;
+
+ private SubmitWorkCallable(SubmitWorkRequestProto request, String hostname, int port) {
+ this.hostname = hostname;
+ this.port = port;
+ this.request = request;
+ }
+
+ @Override
+ public SubmitWorkResponseProto call() throws Exception {
+ return getProxy(hostname, port).submitWork(null, request);
+ }
+ }
+
+ public interface ExecuteRequestCallback {
+ void setResponse(T response);
+ void indicateError(Throwable t);
+ }
+
+ private LlapDaemonProtocolBlockingPB getProxy(String hostname, int port) {
+ String hostId = getHostIdentifier(hostname, port);
+
+ LlapDaemonProtocolBlockingPB proxy = hostProxies.get(hostId);
+ if (proxy == null) {
+ proxy = new LlapDaemonProtocolClientImpl(getConfig(), hostname, port);
+ LlapDaemonProtocolBlockingPB proxyOld = hostProxies.putIfAbsent(hostId, proxy);
+ if (proxyOld != null) {
+ // TODO Shutdown the new proxy.
+ proxy = proxyOld;
+ }
+ }
+ return proxy;
+ }
+
+ private String getHostIdentifier(String hostname, int port) {
+ return hostname + ":" + port;
+ }
+}
diff --git llap-server/src/java/org/apache/tez/dag/app/launcher/DaemonContainerLauncher.java llap-server/src/java/org/apache/tez/dag/app/launcher/DaemonContainerLauncher.java
deleted file mode 100644
index b883058..0000000
--- llap-server/src/java/org/apache/tez/dag/app/launcher/DaemonContainerLauncher.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tez.dag.app.launcher;
-
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.ByteString;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.util.Clock;
-import org.apache.hadoop.hive.llap.daemon.LlapDaemonConfiguration;
-import org.apache.hadoop.hive.llap.daemon.LlapDaemonProtocolBlockingPB;
-import org.apache.hadoop.hive.llap.daemon.impl.LlapDaemonProtocolClientImpl;
-import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RunContainerRequestProto;
-import org.apache.tez.dag.app.AppContext;
-import org.apache.tez.dag.app.TaskAttemptListener;
-import org.apache.tez.dag.app.rm.NMCommunicatorEvent;
-import org.apache.tez.dag.app.rm.NMCommunicatorLaunchRequestEvent;
-import org.apache.tez.dag.app.rm.container.AMContainerEvent;
-import org.apache.tez.dag.app.rm.container.AMContainerEventLaunchFailed;
-import org.apache.tez.dag.app.rm.container.AMContainerEventLaunched;
-import org.apache.tez.dag.app.rm.container.AMContainerEventType;
-import org.apache.tez.dag.history.DAGHistoryEvent;
-import org.apache.tez.dag.history.events.ContainerLaunchedEvent;
-
-public class DaemonContainerLauncher extends AbstractService implements ContainerLauncher {
-
- // TODO Support interruptability of tasks which haven't yet been launched.
-
- // TODO May need multiple connections per target machine, depending upon how synchronization is handled in the RPC layer
-
- static final Log LOG = LogFactory.getLog(DaemonContainerLauncher.class);
-
- private final AppContext context;
- private final ListeningExecutorService executor;
- private final String tokenIdentifier;
- private final TaskAttemptListener tal;
- private final Map proxyMap;
- private final int servicePort;
- private final ApplicationAttemptId appAttemptId;
- private final Clock clock;
-
-
- // Configuration passed in here to set up final parameters
- public DaemonContainerLauncher(AppContext appContext, Configuration conf,
- TaskAttemptListener tal) {
- super(DaemonContainerLauncher.class.getName());
- this.clock = appContext.getClock();
- // TODO Scale this based on numDaemons / threads per daemon
- int numThreads = conf.getInt(LlapDaemonConfiguration.LLAP_DAEMON_AM_COMMUNICATOR_NUM_THREADS,
- LlapDaemonConfiguration.LLAP_DAEMON_AM_COMMUNICATOR_NUM_THREADS_DEFAULT);
- this.servicePort = conf.getInt(LlapDaemonConfiguration.LLAP_DAEMON_RPC_PORT,
- LlapDaemonConfiguration.LLAP_DAEMON_RPC_PORT_DEFAULT);
- ExecutorService localExecutor = Executors.newFixedThreadPool(numThreads,
- new ThreadFactoryBuilder().setNameFormat("DaemonCommunicator #%2d").build());
- executor = MoreExecutors.listeningDecorator(localExecutor);
- this.appAttemptId = appContext.getApplicationAttemptId();
- this.context = appContext;
- this.tokenIdentifier = context.getApplicationID().toString();
- this.tal = tal;
- this.proxyMap = new HashMap();
- }
-
- public void serviceStop() {
- executor.shutdownNow();
- }
-
- private synchronized LlapDaemonProtocolBlockingPB getProxy(String hostname) {
- LlapDaemonProtocolBlockingPB proxy = proxyMap.get(hostname);
- if (proxy == null) {
- proxy = new LlapDaemonProtocolClientImpl(getConfig(), hostname, servicePort);
- proxyMap.put(hostname, proxy);
- }
- return proxy;
- }
-
- @Override
- public void handle(NMCommunicatorEvent event) {
- switch (event.getType()) {
- case CONTAINER_LAUNCH_REQUEST:
- NMCommunicatorLaunchRequestEvent launchEvent = (NMCommunicatorLaunchRequestEvent) event;
- InetSocketAddress address = tal.getTaskCommunicator(launchEvent.getTaskCommId()).getAddress();
- ListenableFuture future = executor.submit(
- new SubmitCallable(getProxy(launchEvent.getNodeId().getHost()), launchEvent,
- tokenIdentifier, appAttemptId, address.getHostName(), address.getPort()));
- Futures.addCallback(future, new SubmitCallback(launchEvent.getContainerId(),
- launchEvent.getContainer().getNodeId().getHost()));
- break;
- case CONTAINER_STOP_REQUEST:
- LOG.info("DEBUG: Ignoring STOP_REQUEST for event: " + event);
- // TODO should this be sending out a Container terminated message ? Noone tells AMContainer
- // that the container is actually done (normally received from RM)
- // TODO Sending this out for an unlaunched container is invalid
- context.getEventHandler().handle(new AMContainerEvent(event.getContainerId(),
- AMContainerEventType.C_NM_STOP_SENT));
- break;
- }
- }
-
-
- private static class SubmitCallable implements Callable {
-
- private final NMCommunicatorLaunchRequestEvent event;
- private final String tokenIdentifier;
- private final String amHost;
- private final int amPort;
- private final LlapDaemonProtocolBlockingPB daemonProxy;
- private final ApplicationAttemptId appAttemptId;
-
- private SubmitCallable(LlapDaemonProtocolBlockingPB daemonProxy,
- NMCommunicatorLaunchRequestEvent event, String tokenIdentifier,
- ApplicationAttemptId appAttemptId,
- String amHost, int amPort) {
- this.event = event;
- this.daemonProxy = daemonProxy;
- this.tokenIdentifier = tokenIdentifier;
- this.appAttemptId = appAttemptId;
- this.amHost = amHost;
- this.amPort = amPort;
- }
-
-
- @Override
- public Void call() throws Exception {
- RunContainerRequestProto.Builder requestBuilder = RunContainerRequestProto.newBuilder();
- // Need the taskAttemptListenerAddress
- requestBuilder.setAmHost(amHost).setAmPort(amPort);
- requestBuilder.setAppAttemptNumber(appAttemptId.getAttemptId());
- requestBuilder.setApplicationIdString(appAttemptId.getApplicationId().toString());
- requestBuilder.setTokenIdentifier(tokenIdentifier);
- requestBuilder.setContainerIdString(event.getContainer().getId().toString());
- requestBuilder.setCredentialsBinary(
- ByteString.copyFrom(event.getContainerLaunchContext().getTokens()));
- requestBuilder.setUser(System.getenv(ApplicationConstants.Environment.USER.name()));
-
- RunContainerRequestProto request = requestBuilder.build();
- daemonProxy.runContainer(null, request);
- return null;
- }
- }
-
- private class SubmitCallback implements FutureCallback {
-
- private final ContainerId containerId;
- private final String host;
-
- private SubmitCallback(ContainerId containerId, String host) {
- this.containerId = containerId;
- this.host = host;
- }
-
- @Override
- public void onSuccess(Void result) {
- LOG.info("Container: " + containerId + " launch succeeded on host: " + host);
- context.getEventHandler().handle(new AMContainerEventLaunched(containerId));
- ContainerLaunchedEvent lEvt = new ContainerLaunchedEvent(
- containerId, clock.getTime(), context.getApplicationAttemptId());
- context.getHistoryHandler().handle(new DAGHistoryEvent(
- null, lEvt));
- }
-
- @Override
- public void onFailure(Throwable t) {
- LOG.error("Failed to launch container: " + containerId + " on host: " + host, t);
- sendContainerLaunchFailedMsg(containerId, t);
-
- }
- }
-
- @SuppressWarnings("unchecked")
- void sendContainerLaunchFailedMsg(ContainerId containerId, Throwable t) {
- context.getEventHandler().handle(new AMContainerEventLaunchFailed(containerId, t == null ? "" : t.getMessage()));
- }
-
-
-}
diff --git llap-server/src/java/org/apache/tez/dag/app/rm/DaemonTaskSchedulerService.java llap-server/src/java/org/apache/tez/dag/app/rm/DaemonTaskSchedulerService.java
deleted file mode 100644
index 34c04e7..0000000
--- llap-server/src/java/org/apache/tez/dag/app/rm/DaemonTaskSchedulerService.java
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tez.dag.app.rm;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import com.google.common.primitives.Ints;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.api.records.NodeReport;
-import org.apache.hadoop.yarn.api.records.NodeState;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.client.api.YarnClient;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.hive.llap.daemon.LlapDaemonConfiguration;
-import org.apache.tez.dag.app.AppContext;
-
-
-public class DaemonTaskSchedulerService extends TaskSchedulerService {
-
- private static final Log LOG = LogFactory.getLog(DaemonTaskSchedulerService.class);
-
- private final ExecutorService appCallbackExecutor;
- private final TaskSchedulerAppCallback appClientDelegate;
- private final AppContext appContext;
- private final List serviceHosts;
- private final Set serviceHostSet;
- private final ContainerFactory containerFactory;
- private final Random random = new Random();
-
- private final String clientHostname;
- private final int clientPort;
- private final String trackingUrl;
- private final AtomicBoolean isStopped = new AtomicBoolean(false);
- private final ConcurrentMap