diff --git llap-client/src/java/org/apache/hadoop/hive/llap/configuration/LlapConfiguration.java llap-client/src/java/org/apache/hadoop/hive/llap/configuration/LlapConfiguration.java
index f03c807..f6ec119 100644
--- llap-client/src/java/org/apache/hadoop/hive/llap/configuration/LlapConfiguration.java
+++ llap-client/src/java/org/apache/hadoop/hive/llap/configuration/LlapConfiguration.java
@@ -65,8 +65,14 @@ public LlapConfiguration() {
public static final String LLAP_DAEMON_VCPUS_PER_INSTANCE = LLAP_DAEMON_PREFIX + "vcpus.per.instance";
public static final int LLAP_DAEMON_VCPUS_PER_INSTANCE_DEFAULT = 4;
+ public static final String LLAP_DAEMON_NUM_FILE_CLEANER_THREADS = LLAP_DAEMON_PREFIX + "num.file.cleaner.threads";
+ public static final int LLAP_DAEMON_NUM_FILE_CLEANER_THREADS_DEFAULT = 1;
+
// Section for configs used in the AM //
+ public static final String LLAP_FILE_CLEANUP_DELAY_SECONDS = LLAP_PREFIX + "file.cleanup.delay-seconds";
+ public static final long LLAP_FILE_CLEANUP_DELAY_SECONDS_DEFAULT = 300; // 5 minutes by default
+
public static final String LLAP_DAEMON_SERVICE_HOSTS = LLAP_DAEMON_PREFIX + "service.hosts";
public static final String LLAP_DAEMON_SERVICE_REFRESH_INTERVAL = LLAP_DAEMON_PREFIX + "service.refresh.interval";
diff --git llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
index 1500635..8748151 100644
--- llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
+++ llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
@@ -9622,6 +9622,2555 @@ public Builder mergeFrom(
// @@protoc_insertion_point(class_scope:SourceStateUpdatedResponseProto)
}
+ public interface QueryCompleteRequestProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string query_id = 1;
+ /**
+ * optional string query_id = 1;
+ */
+ boolean hasQueryId();
+ /**
+ * optional string query_id = 1;
+ */
+ java.lang.String getQueryId();
+ /**
+ * optional string query_id = 1;
+ */
+ com.google.protobuf.ByteString
+ getQueryIdBytes();
+
+ // optional string dag_name = 2;
+ /**
+ * optional string dag_name = 2;
+ */
+ boolean hasDagName();
+ /**
+ * optional string dag_name = 2;
+ */
+ java.lang.String getDagName();
+ /**
+ * optional string dag_name = 2;
+ */
+ com.google.protobuf.ByteString
+ getDagNameBytes();
+
+ // optional int64 delete_delay = 3 [default = 0];
+ /**
+ * optional int64 delete_delay = 3 [default = 0];
+ */
+ boolean hasDeleteDelay();
+ /**
+ * optional int64 delete_delay = 3 [default = 0];
+ */
+ long getDeleteDelay();
+ }
+ /**
+ * Protobuf type {@code QueryCompleteRequestProto}
+ */
+ public static final class QueryCompleteRequestProto extends
+ com.google.protobuf.GeneratedMessage
+ implements QueryCompleteRequestProtoOrBuilder {
+ // Use QueryCompleteRequestProto.newBuilder() to construct.
+ private QueryCompleteRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private QueryCompleteRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final QueryCompleteRequestProto defaultInstance;
+ public static QueryCompleteRequestProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public QueryCompleteRequestProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private QueryCompleteRequestProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ queryId_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ dagName_ = input.readBytes();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ deleteDelay_ = input.readInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteRequestProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteRequestProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public QueryCompleteRequestProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new QueryCompleteRequestProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string query_id = 1;
+ public static final int QUERY_ID_FIELD_NUMBER = 1;
+ private java.lang.Object queryId_;
+ /**
+ * optional string query_id = 1;
+ */
+ public boolean hasQueryId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public java.lang.String getQueryId() {
+ java.lang.Object ref = queryId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ queryId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getQueryIdBytes() {
+ java.lang.Object ref = queryId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ queryId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string dag_name = 2;
+ public static final int DAG_NAME_FIELD_NUMBER = 2;
+ private java.lang.Object dagName_;
+ /**
+ * optional string dag_name = 2;
+ */
+ public boolean hasDagName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public java.lang.String getDagName() {
+ java.lang.Object ref = dagName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ dagName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getDagNameBytes() {
+ java.lang.Object ref = dagName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ dagName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int64 delete_delay = 3 [default = 0];
+ public static final int DELETE_DELAY_FIELD_NUMBER = 3;
+ private long deleteDelay_;
+ /**
+ * optional int64 delete_delay = 3 [default = 0];
+ */
+ public boolean hasDeleteDelay() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int64 delete_delay = 3 [default = 0];
+ */
+ public long getDeleteDelay() {
+ return deleteDelay_;
+ }
+
+ private void initFields() {
+ queryId_ = "";
+ dagName_ = "";
+ deleteDelay_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getQueryIdBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getDagNameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt64(3, deleteDelay_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getQueryIdBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getDagNameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(3, deleteDelay_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto) obj;
+
+ boolean result = true;
+ result = result && (hasQueryId() == other.hasQueryId());
+ if (hasQueryId()) {
+ result = result && getQueryId()
+ .equals(other.getQueryId());
+ }
+ result = result && (hasDagName() == other.hasDagName());
+ if (hasDagName()) {
+ result = result && getDagName()
+ .equals(other.getDagName());
+ }
+ result = result && (hasDeleteDelay() == other.hasDeleteDelay());
+ if (hasDeleteDelay()) {
+ result = result && (getDeleteDelay()
+ == other.getDeleteDelay());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasQueryId()) {
+ hash = (37 * hash) + QUERY_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getQueryId().hashCode();
+ }
+ if (hasDagName()) {
+ hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getDagName().hashCode();
+ }
+ if (hasDeleteDelay()) {
+ hash = (37 * hash) + DELETE_DELAY_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getDeleteDelay());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code QueryCompleteRequestProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteRequestProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteRequestProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ queryId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ dagName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ deleteDelay_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteRequestProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.queryId_ = queryId_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.dagName_ = dagName_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.deleteDelay_ = deleteDelay_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.getDefaultInstance()) return this;
+ if (other.hasQueryId()) {
+ bitField0_ |= 0x00000001;
+ queryId_ = other.queryId_;
+ onChanged();
+ }
+ if (other.hasDagName()) {
+ bitField0_ |= 0x00000002;
+ dagName_ = other.dagName_;
+ onChanged();
+ }
+ if (other.hasDeleteDelay()) {
+ setDeleteDelay(other.getDeleteDelay());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string query_id = 1;
+ private java.lang.Object queryId_ = "";
+ /**
+ * optional string query_id = 1;
+ */
+ public boolean hasQueryId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public java.lang.String getQueryId() {
+ java.lang.Object ref = queryId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ queryId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getQueryIdBytes() {
+ java.lang.Object ref = queryId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ queryId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public Builder setQueryId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ queryId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public Builder clearQueryId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ queryId_ = getDefaultInstance().getQueryId();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public Builder setQueryIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ queryId_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string dag_name = 2;
+ private java.lang.Object dagName_ = "";
+ /**
+ * optional string dag_name = 2;
+ */
+ public boolean hasDagName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public java.lang.String getDagName() {
+ java.lang.Object ref = dagName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ dagName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getDagNameBytes() {
+ java.lang.Object ref = dagName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ dagName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public Builder setDagName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ dagName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public Builder clearDagName() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ dagName_ = getDefaultInstance().getDagName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public Builder setDagNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ dagName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 delete_delay = 3 [default = 0];
+ private long deleteDelay_ ;
+ /**
+ * optional int64 delete_delay = 3 [default = 0];
+ */
+ public boolean hasDeleteDelay() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int64 delete_delay = 3 [default = 0];
+ */
+ public long getDeleteDelay() {
+ return deleteDelay_;
+ }
+ /**
+ * optional int64 delete_delay = 3 [default = 0];
+ */
+ public Builder setDeleteDelay(long value) {
+ bitField0_ |= 0x00000004;
+ deleteDelay_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int64 delete_delay = 3 [default = 0];
+ */
+ public Builder clearDeleteDelay() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ deleteDelay_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:QueryCompleteRequestProto)
+ }
+
+ static {
+ defaultInstance = new QueryCompleteRequestProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:QueryCompleteRequestProto)
+ }
+
+ public interface QueryCompleteResponseProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code QueryCompleteResponseProto}
+ */
+ public static final class QueryCompleteResponseProto extends
+ com.google.protobuf.GeneratedMessage
+ implements QueryCompleteResponseProtoOrBuilder {
+ // Use QueryCompleteResponseProto.newBuilder() to construct.
+ private QueryCompleteResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private QueryCompleteResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final QueryCompleteResponseProto defaultInstance;
+ public static QueryCompleteResponseProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public QueryCompleteResponseProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private QueryCompleteResponseProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteResponseProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteResponseProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public QueryCompleteResponseProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new QueryCompleteResponseProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code QueryCompleteResponseProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteResponseProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteResponseProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteResponseProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:QueryCompleteResponseProto)
+ }
+
+ static {
+ defaultInstance = new QueryCompleteResponseProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:QueryCompleteResponseProto)
+ }
+
+ public interface TerminateFragmentRequestProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string query_id = 1;
+ /**
+ * optional string query_id = 1;
+ */
+ boolean hasQueryId();
+ /**
+ * optional string query_id = 1;
+ */
+ java.lang.String getQueryId();
+ /**
+ * optional string query_id = 1;
+ */
+ com.google.protobuf.ByteString
+ getQueryIdBytes();
+
+ // optional string dag_name = 2;
+ /**
+ * optional string dag_name = 2;
+ */
+ boolean hasDagName();
+ /**
+ * optional string dag_name = 2;
+ */
+ java.lang.String getDagName();
+ /**
+ * optional string dag_name = 2;
+ */
+ com.google.protobuf.ByteString
+ getDagNameBytes();
+
+ // optional int32 dag_attempt_number = 3;
+ /**
+ * optional int32 dag_attempt_number = 3;
+ */
+ boolean hasDagAttemptNumber();
+ /**
+ * optional int32 dag_attempt_number = 3;
+ */
+ int getDagAttemptNumber();
+
+ // optional string vertex_name = 4;
+ /**
+ * optional string vertex_name = 4;
+ */
+ boolean hasVertexName();
+ /**
+ * optional string vertex_name = 4;
+ */
+ java.lang.String getVertexName();
+ /**
+ * optional string vertex_name = 4;
+ */
+ com.google.protobuf.ByteString
+ getVertexNameBytes();
+
+ // optional int32 fragment_number = 5;
+ /**
+ * optional int32 fragment_number = 5;
+ */
+ boolean hasFragmentNumber();
+ /**
+ * optional int32 fragment_number = 5;
+ */
+ int getFragmentNumber();
+
+ // optional int32 attempt_number = 6;
+ /**
+ * optional int32 attempt_number = 6;
+ */
+ boolean hasAttemptNumber();
+ /**
+ * optional int32 attempt_number = 6;
+ */
+ int getAttemptNumber();
+ }
+ /**
+ * Protobuf type {@code TerminateFragmentRequestProto}
+ */
+ public static final class TerminateFragmentRequestProto extends
+ com.google.protobuf.GeneratedMessage
+ implements TerminateFragmentRequestProtoOrBuilder {
+ // Use TerminateFragmentRequestProto.newBuilder() to construct.
+ private TerminateFragmentRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TerminateFragmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TerminateFragmentRequestProto defaultInstance;
+ public static TerminateFragmentRequestProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TerminateFragmentRequestProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TerminateFragmentRequestProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ queryId_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ dagName_ = input.readBytes();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ dagAttemptNumber_ = input.readInt32();
+ break;
+ }
+ case 34: {
+ bitField0_ |= 0x00000008;
+ vertexName_ = input.readBytes();
+ break;
+ }
+ case 40: {
+ bitField0_ |= 0x00000010;
+ fragmentNumber_ = input.readInt32();
+ break;
+ }
+ case 48: {
+ bitField0_ |= 0x00000020;
+ attemptNumber_ = input.readInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentRequestProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentRequestProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public TerminateFragmentRequestProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TerminateFragmentRequestProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string query_id = 1;
+ public static final int QUERY_ID_FIELD_NUMBER = 1;
+ private java.lang.Object queryId_;
+ /**
+ * optional string query_id = 1;
+ */
+ public boolean hasQueryId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public java.lang.String getQueryId() {
+ java.lang.Object ref = queryId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ queryId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getQueryIdBytes() {
+ java.lang.Object ref = queryId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ queryId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string dag_name = 2;
+ public static final int DAG_NAME_FIELD_NUMBER = 2;
+ private java.lang.Object dagName_;
+ /**
+ * optional string dag_name = 2;
+ */
+ public boolean hasDagName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public java.lang.String getDagName() {
+ java.lang.Object ref = dagName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ dagName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getDagNameBytes() {
+ java.lang.Object ref = dagName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ dagName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int32 dag_attempt_number = 3;
+ public static final int DAG_ATTEMPT_NUMBER_FIELD_NUMBER = 3;
+ private int dagAttemptNumber_;
+ /**
+ * optional int32 dag_attempt_number = 3;
+ */
+ public boolean hasDagAttemptNumber() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int32 dag_attempt_number = 3;
+ */
+ public int getDagAttemptNumber() {
+ return dagAttemptNumber_;
+ }
+
+ // optional string vertex_name = 4;
+ public static final int VERTEX_NAME_FIELD_NUMBER = 4;
+ private java.lang.Object vertexName_;
+ /**
+ * optional string vertex_name = 4;
+ */
+ public boolean hasVertexName() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string vertex_name = 4;
+ */
+ public java.lang.String getVertexName() {
+ java.lang.Object ref = vertexName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ vertexName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string vertex_name = 4;
+ */
+ public com.google.protobuf.ByteString
+ getVertexNameBytes() {
+ java.lang.Object ref = vertexName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ vertexName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int32 fragment_number = 5;
+ public static final int FRAGMENT_NUMBER_FIELD_NUMBER = 5;
+ private int fragmentNumber_;
+ /**
+ * optional int32 fragment_number = 5;
+ */
+ public boolean hasFragmentNumber() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * optional int32 fragment_number = 5;
+ */
+ public int getFragmentNumber() {
+ return fragmentNumber_;
+ }
+
+ // optional int32 attempt_number = 6;
+ public static final int ATTEMPT_NUMBER_FIELD_NUMBER = 6;
+ private int attemptNumber_;
+ /**
+ * optional int32 attempt_number = 6;
+ */
+ public boolean hasAttemptNumber() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * optional int32 attempt_number = 6;
+ */
+ public int getAttemptNumber() {
+ return attemptNumber_;
+ }
+
+ private void initFields() {
+ queryId_ = "";
+ dagName_ = "";
+ dagAttemptNumber_ = 0;
+ vertexName_ = "";
+ fragmentNumber_ = 0;
+ attemptNumber_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getQueryIdBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getDagNameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt32(3, dagAttemptNumber_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, getVertexNameBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeInt32(5, fragmentNumber_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeInt32(6, attemptNumber_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getQueryIdBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getDagNameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, dagAttemptNumber_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, getVertexNameBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(5, fragmentNumber_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(6, attemptNumber_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto) obj;
+
+ boolean result = true;
+ result = result && (hasQueryId() == other.hasQueryId());
+ if (hasQueryId()) {
+ result = result && getQueryId()
+ .equals(other.getQueryId());
+ }
+ result = result && (hasDagName() == other.hasDagName());
+ if (hasDagName()) {
+ result = result && getDagName()
+ .equals(other.getDagName());
+ }
+ result = result && (hasDagAttemptNumber() == other.hasDagAttemptNumber());
+ if (hasDagAttemptNumber()) {
+ result = result && (getDagAttemptNumber()
+ == other.getDagAttemptNumber());
+ }
+ result = result && (hasVertexName() == other.hasVertexName());
+ if (hasVertexName()) {
+ result = result && getVertexName()
+ .equals(other.getVertexName());
+ }
+ result = result && (hasFragmentNumber() == other.hasFragmentNumber());
+ if (hasFragmentNumber()) {
+ result = result && (getFragmentNumber()
+ == other.getFragmentNumber());
+ }
+ result = result && (hasAttemptNumber() == other.hasAttemptNumber());
+ if (hasAttemptNumber()) {
+ result = result && (getAttemptNumber()
+ == other.getAttemptNumber());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasQueryId()) {
+ hash = (37 * hash) + QUERY_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getQueryId().hashCode();
+ }
+ if (hasDagName()) {
+ hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getDagName().hashCode();
+ }
+ if (hasDagAttemptNumber()) {
+ hash = (37 * hash) + DAG_ATTEMPT_NUMBER_FIELD_NUMBER;
+ hash = (53 * hash) + getDagAttemptNumber();
+ }
+ if (hasVertexName()) {
+ hash = (37 * hash) + VERTEX_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getVertexName().hashCode();
+ }
+ if (hasFragmentNumber()) {
+ hash = (37 * hash) + FRAGMENT_NUMBER_FIELD_NUMBER;
+ hash = (53 * hash) + getFragmentNumber();
+ }
+ if (hasAttemptNumber()) {
+ hash = (37 * hash) + ATTEMPT_NUMBER_FIELD_NUMBER;
+ hash = (53 * hash) + getAttemptNumber();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code TerminateFragmentRequestProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentRequestProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentRequestProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ queryId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ dagName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ dagAttemptNumber_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ vertexName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000008);
+ fragmentNumber_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ attemptNumber_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentRequestProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.queryId_ = queryId_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.dagName_ = dagName_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.dagAttemptNumber_ = dagAttemptNumber_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.vertexName_ = vertexName_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.fragmentNumber_ = fragmentNumber_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.attemptNumber_ = attemptNumber_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance()) return this;
+ if (other.hasQueryId()) {
+ bitField0_ |= 0x00000001;
+ queryId_ = other.queryId_;
+ onChanged();
+ }
+ if (other.hasDagName()) {
+ bitField0_ |= 0x00000002;
+ dagName_ = other.dagName_;
+ onChanged();
+ }
+ if (other.hasDagAttemptNumber()) {
+ setDagAttemptNumber(other.getDagAttemptNumber());
+ }
+ if (other.hasVertexName()) {
+ bitField0_ |= 0x00000008;
+ vertexName_ = other.vertexName_;
+ onChanged();
+ }
+ if (other.hasFragmentNumber()) {
+ setFragmentNumber(other.getFragmentNumber());
+ }
+ if (other.hasAttemptNumber()) {
+ setAttemptNumber(other.getAttemptNumber());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string query_id = 1;
+ private java.lang.Object queryId_ = "";
+ /**
+ * optional string query_id = 1;
+ */
+ public boolean hasQueryId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public java.lang.String getQueryId() {
+ java.lang.Object ref = queryId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ queryId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getQueryIdBytes() {
+ java.lang.Object ref = queryId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ queryId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public Builder setQueryId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ queryId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public Builder clearQueryId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ queryId_ = getDefaultInstance().getQueryId();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string query_id = 1;
+ */
+ public Builder setQueryIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ queryId_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string dag_name = 2;
+ private java.lang.Object dagName_ = "";
+ /**
+ * optional string dag_name = 2;
+ */
+ public boolean hasDagName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public java.lang.String getDagName() {
+ java.lang.Object ref = dagName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ dagName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getDagNameBytes() {
+ java.lang.Object ref = dagName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ dagName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public Builder setDagName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ dagName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public Builder clearDagName() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ dagName_ = getDefaultInstance().getDagName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string dag_name = 2;
+ */
+ public Builder setDagNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ dagName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 dag_attempt_number = 3;
+ private int dagAttemptNumber_ ;
+ /**
+ * optional int32 dag_attempt_number = 3;
+ */
+ public boolean hasDagAttemptNumber() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int32 dag_attempt_number = 3;
+ */
+ public int getDagAttemptNumber() {
+ return dagAttemptNumber_;
+ }
+ /**
+ * optional int32 dag_attempt_number = 3;
+ */
+ public Builder setDagAttemptNumber(int value) {
+ bitField0_ |= 0x00000004;
+ dagAttemptNumber_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 dag_attempt_number = 3;
+ */
+ public Builder clearDagAttemptNumber() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ dagAttemptNumber_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional string vertex_name = 4;
+ private java.lang.Object vertexName_ = "";
+ /**
+ * optional string vertex_name = 4;
+ */
+ public boolean hasVertexName() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string vertex_name = 4;
+ */
+ public java.lang.String getVertexName() {
+ java.lang.Object ref = vertexName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ vertexName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string vertex_name = 4;
+ */
+ public com.google.protobuf.ByteString
+ getVertexNameBytes() {
+ java.lang.Object ref = vertexName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ vertexName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string vertex_name = 4;
+ */
+ public Builder setVertexName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ vertexName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string vertex_name = 4;
+ */
+ public Builder clearVertexName() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ vertexName_ = getDefaultInstance().getVertexName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string vertex_name = 4;
+ */
+ public Builder setVertexNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ vertexName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 fragment_number = 5;
+ private int fragmentNumber_ ;
+ /**
+ * optional int32 fragment_number = 5;
+ */
+ public boolean hasFragmentNumber() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * optional int32 fragment_number = 5;
+ */
+ public int getFragmentNumber() {
+ return fragmentNumber_;
+ }
+ /**
+ * optional int32 fragment_number = 5;
+ */
+ public Builder setFragmentNumber(int value) {
+ bitField0_ |= 0x00000010;
+ fragmentNumber_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 fragment_number = 5;
+ */
+ public Builder clearFragmentNumber() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ fragmentNumber_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 attempt_number = 6;
+ private int attemptNumber_ ;
+ /**
+ * optional int32 attempt_number = 6;
+ */
+ public boolean hasAttemptNumber() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * optional int32 attempt_number = 6;
+ */
+ public int getAttemptNumber() {
+ return attemptNumber_;
+ }
+ /**
+ * optional int32 attempt_number = 6;
+ */
+ public Builder setAttemptNumber(int value) {
+ bitField0_ |= 0x00000020;
+ attemptNumber_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 attempt_number = 6;
+ */
+ public Builder clearAttemptNumber() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ attemptNumber_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:TerminateFragmentRequestProto)
+ }
+
+ static {
+ defaultInstance = new TerminateFragmentRequestProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:TerminateFragmentRequestProto)
+ }
+
+ public interface TerminateFragmentResponseProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code TerminateFragmentResponseProto}
+ */
+ public static final class TerminateFragmentResponseProto extends
+ com.google.protobuf.GeneratedMessage
+ implements TerminateFragmentResponseProtoOrBuilder {
+ // Use TerminateFragmentResponseProto.newBuilder() to construct.
+ private TerminateFragmentResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TerminateFragmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TerminateFragmentResponseProto defaultInstance;
+ public static TerminateFragmentResponseProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TerminateFragmentResponseProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TerminateFragmentResponseProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentResponseProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentResponseProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public TerminateFragmentResponseProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TerminateFragmentResponseProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code TerminateFragmentResponseProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentResponseProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentResponseProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentResponseProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:TerminateFragmentResponseProto)
+ }
+
+ static {
+ defaultInstance = new TerminateFragmentResponseProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:TerminateFragmentResponseProto)
+ }
+
/**
* Protobuf service {@code LlapDaemonProtocol}
*/
@@ -9646,6 +12195,22 @@ public abstract void sourceStateUpdated(
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc queryComplete(.QueryCompleteRequestProto) returns (.QueryCompleteResponseProto);
+ */
+ public abstract void queryComplete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc terminateFragment(.TerminateFragmentRequestProto) returns (.TerminateFragmentResponseProto);
+ */
+ public abstract void terminateFragment(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -9667,6 +12232,22 @@ public void sourceStateUpdated(
impl.sourceStateUpdated(controller, request, done);
}
+ @java.lang.Override
+ public void queryComplete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.queryComplete(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void terminateFragment(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.terminateFragment(controller, request, done);
+ }
+
};
}
@@ -9693,6 +12274,10 @@ public void sourceStateUpdated(
return impl.submitWork(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto)request);
case 1:
return impl.sourceStateUpdated(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto)request);
+ case 2:
+ return impl.queryComplete(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto)request);
+ case 3:
+ return impl.terminateFragment(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -9711,6 +12296,10 @@ public void sourceStateUpdated(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance();
+ case 2:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -9729,6 +12318,10 @@ public void sourceStateUpdated(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance();
+ case 2:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -9753,6 +12346,22 @@ public abstract void sourceStateUpdated(
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc queryComplete(.QueryCompleteRequestProto) returns (.QueryCompleteResponseProto);
+ */
+ public abstract void queryComplete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc terminateFragment(.TerminateFragmentRequestProto) returns (.TerminateFragmentResponseProto);
+ */
+ public abstract void terminateFragment(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -9785,6 +12394,16 @@ public final void callMethod(
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
+ case 2:
+ this.queryComplete(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 3:
+ this.terminateFragment(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -9803,6 +12422,10 @@ public final void callMethod(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance();
+ case 2:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -9821,6 +12444,10 @@ public final void callMethod(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance();
+ case 2:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -9871,6 +12498,36 @@ public void sourceStateUpdated(
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.class,
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance()));
}
+
+ public void queryComplete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(2),
+ controller,
+ request,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.class,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance()));
+ }
+
+ public void terminateFragment(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(3),
+ controller,
+ request,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.class,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -9888,6 +12545,16 @@ public static BlockingInterface newBlockingStub(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto queryComplete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request)
+ throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto terminateFragment(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -9920,6 +12587,30 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance());
}
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto queryComplete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(2),
+ controller,
+ request,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto terminateFragment(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(3),
+ controller,
+ request,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance());
+ }
+
}
// @@protoc_insertion_point(class_scope:LlapDaemonProtocol)
@@ -9975,6 +12666,26 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_SourceStateUpdatedResponseProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_QueryCompleteRequestProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_QueryCompleteRequestProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_QueryCompleteResponseProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_QueryCompleteResponseProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_TerminateFragmentRequestProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_TerminateFragmentRequestProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_TerminateFragmentResponseProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_TerminateFragmentResponseProto_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -10020,15 +12731,27 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
"\"f\n\036SourceStateUpdatedRequestProto\022\020\n\010da" +
"g_name\030\001 \001(\t\022\020\n\010src_name\030\002 \001(\t\022 \n\005state\030" +
"\003 \001(\0162\021.SourceStateProto\"!\n\037SourceStateU" +
- "pdatedResponseProto*2\n\020SourceStateProto\022" +
- "\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNING\020\0022\256\001\n\022Llap" +
- "DaemonProtocol\022?\n\nsubmitWork\022\027.SubmitWor" +
- "kRequestProto\032\030.SubmitWorkResponseProto\022",
- "W\n\022sourceStateUpdated\022\037.SourceStateUpdat" +
- "edRequestProto\032 .SourceStateUpdatedRespo" +
- "nseProtoBH\n&org.apache.hadoop.hive.llap." +
- "daemon.rpcB\030LlapDaemonProtocolProtos\210\001\001\240" +
- "\001\001"
+ "pdatedResponseProto\"X\n\031QueryCompleteRequ" +
+ "estProto\022\020\n\010query_id\030\001 \001(\t\022\020\n\010dag_name\030\002" +
+ " \001(\t\022\027\n\014delete_delay\030\003 \001(\003:\0010\"\034\n\032QueryCo" +
+ "mpleteResponseProto\"\245\001\n\035TerminateFragmen",
+ "tRequestProto\022\020\n\010query_id\030\001 \001(\t\022\020\n\010dag_n" +
+ "ame\030\002 \001(\t\022\032\n\022dag_attempt_number\030\003 \001(\005\022\023\n" +
+ "\013vertex_name\030\004 \001(\t\022\027\n\017fragment_number\030\005 " +
+ "\001(\005\022\026\n\016attempt_number\030\006 \001(\005\" \n\036Terminate" +
+ "FragmentResponseProto*2\n\020SourceStateProt" +
+ "o\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNING\020\0022\316\002\n\022Ll" +
+ "apDaemonProtocol\022?\n\nsubmitWork\022\027.SubmitW" +
+ "orkRequestProto\032\030.SubmitWorkResponseProt" +
+ "o\022W\n\022sourceStateUpdated\022\037.SourceStateUpd" +
+ "atedRequestProto\032 .SourceStateUpdatedRes",
+ "ponseProto\022H\n\rqueryComplete\022\032.QueryCompl" +
+ "eteRequestProto\032\033.QueryCompleteResponseP" +
+ "roto\022T\n\021terminateFragment\022\036.TerminateFra" +
+ "gmentRequestProto\032\037.TerminateFragmentRes" +
+ "ponseProtoBH\n&org.apache.hadoop.hive.lla" +
+ "p.daemon.rpcB\030LlapDaemonProtocolProtos\210\001" +
+ "\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -10095,6 +12818,30 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SourceStateUpdatedResponseProto_descriptor,
new java.lang.String[] { });
+ internal_static_QueryCompleteRequestProto_descriptor =
+ getDescriptor().getMessageTypes().get(10);
+ internal_static_QueryCompleteRequestProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_QueryCompleteRequestProto_descriptor,
+ new java.lang.String[] { "QueryId", "DagName", "DeleteDelay", });
+ internal_static_QueryCompleteResponseProto_descriptor =
+ getDescriptor().getMessageTypes().get(11);
+ internal_static_QueryCompleteResponseProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_QueryCompleteResponseProto_descriptor,
+ new java.lang.String[] { });
+ internal_static_TerminateFragmentRequestProto_descriptor =
+ getDescriptor().getMessageTypes().get(12);
+ internal_static_TerminateFragmentRequestProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_TerminateFragmentRequestProto_descriptor,
+ new java.lang.String[] { "QueryId", "DagName", "DagAttemptNumber", "VertexName", "FragmentNumber", "AttemptNumber", });
+ internal_static_TerminateFragmentResponseProto_descriptor =
+ getDescriptor().getMessageTypes().get(13);
+ internal_static_TerminateFragmentResponseProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_TerminateFragmentResponseProto_descriptor,
+ new java.lang.String[] { });
return null;
}
};
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
index ca04557..515bf3c 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/LlapNodeId.java
@@ -81,9 +81,6 @@ public int hashCode() {
@Override
public String toString() {
- return "LlapNodeId{" +
- "hostname='" + hostname + '\'' +
- ", port=" + port +
- '}';
+ return hostname + ":" + port;
}
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
index 82f3b59..f3ce33b 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
@@ -16,12 +16,19 @@
import java.io.IOException;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto;
public interface ContainerRunner {
void submitWork(SubmitWorkRequestProto request) throws IOException;
void sourceStateUpdated(SourceStateUpdatedRequestProto request);
+
+ void queryComplete(QueryCompleteRequestProto request);
+
+ void terminateFragment(TerminateFragmentRequestProto request);
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
index 061e875..c9e5829 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -14,7 +14,6 @@
package org.apache.hadoop.hive.llap.daemon.impl;
-import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
@@ -26,19 +25,18 @@
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.llap.LlapNodeId;
import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
import org.apache.hadoop.hive.llap.daemon.HistoryLogger;
-import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto;
import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler;
import org.apache.hadoop.io.DataInputBuffer;
@@ -53,6 +51,7 @@
import org.apache.tez.common.security.TokenCache;
import org.apache.tez.dag.api.TezConfiguration;
import org.apache.tez.dag.api.TezConstants;
+import org.apache.tez.dag.records.TezTaskAttemptID;
import org.apache.tez.runtime.api.impl.ExecutionContextImpl;
import com.google.common.base.Preconditions;
@@ -64,11 +63,11 @@
private static final Logger LOG = Logger.getLogger(ContainerRunnerImpl.class);
private volatile AMReporter amReporter;
+ private final QueryTracker queryTracker;
private final Scheduler executorService;
private final AtomicReference localAddress;
private final String[] localDirsBase;
private final Map localEnv = new HashMap<>();
- private final FileSystem localFs;
private final long memoryPerExecutor;
private final LlapDaemonExecutorMetrics metrics;
private final Configuration conf;
@@ -89,6 +88,7 @@ public ContainerRunnerImpl(Configuration conf, int numExecutors, int waitQueueSi
this.localDirsBase = localDirsBase;
this.localAddress = localAddress;
+ this.queryTracker = new QueryTracker(conf, localDirsBase);
this.executorService = new TaskExecutorService(numExecutors, waitQueueSize, enablePreemption);
AuxiliaryServiceHelper.setServiceDataIntoEnv(
TezConstants.TEZ_SHUFFLE_HANDLER_SERVICE_ID,
@@ -99,11 +99,6 @@ public ContainerRunnerImpl(Configuration conf, int numExecutors, int waitQueueSi
this.memoryPerExecutor = (long)(totalMemoryAvailableBytes * 0.8 / (float) numExecutors);
this.metrics = metrics;
- try {
- localFs = FileSystem.getLocal(conf);
- } catch (IOException e) {
- throw new RuntimeException("Failed to setup local filesystem instance", e);
- }
confParams = new TaskRunnerCallable.ConfParams(
conf.getInt(TezConfiguration.TEZ_TASK_AM_HEARTBEAT_INTERVAL_MS,
TezConfiguration.TEZ_TASK_AM_HEARTBEAT_INTERVAL_MS_DEFAULT),
@@ -135,19 +130,10 @@ protected void serviceStop() throws Exception {
amReporter.stop();
amReporter = null;
}
+ queryTracker.shutdown();
super.serviceStop();
}
- // TODO Move this into a utilities class
- private static String createAppSpecificLocalDir(String baseDir, String applicationIdString,
- String user) {
- // TODO This is broken for secure clusters. The app will not have permission to create these directories.
- // May work via Slider - since the directory would already exist. Otherwise may need a custom shuffle handler.
- // TODO This should be the process user - and not the user on behalf of whom the query is being submitted.
- return baseDir + File.separator + "usercache" + File.separator + user + File.separator +
- "appcache" + File.separator + applicationIdString;
- }
-
@Override
public void submitWork(SubmitWorkRequestProto request) throws IOException {
HistoryLogger.logFragmentStart(request.getApplicationIdString(), request.getContainerIdString(),
@@ -170,15 +156,20 @@ public void submitWork(SubmitWorkRequestProto request) throws IOException {
env.putAll(localEnv);
env.put(ApplicationConstants.Environment.USER.name(), request.getUser());
- String[] localDirs = new String[localDirsBase.length];
+ FragmentSpecProto fragmentSpec = request.getFragmentSpec();
+ TezTaskAttemptID taskAttemptId = TezTaskAttemptID.fromString(
+ fragmentSpec.getTaskAttemptIdString());
+ int dagIdentifier = taskAttemptId.getTaskID().getVertexID().getDAGId().getId();
+
+ queryTracker
+ .registerFragment(null, request.getApplicationIdString(), fragmentSpec.getDagName(),
+ dagIdentifier,
+ fragmentSpec.getVertexName(), fragmentSpec.getFragmentNumber(),
+ fragmentSpec.getAttemptNumber(), request.getUser());
+
+ String []localDirs = queryTracker.getLocalDirs(null, fragmentSpec.getDagName(), request.getUser());
+ Preconditions.checkNotNull(localDirs);
- // Setup up local dirs to be application specific, and create them.
- for (int i = 0; i < localDirsBase.length; i++) {
- localDirs[i] = createAppSpecificLocalDir(localDirsBase[i], request.getApplicationIdString(),
- request.getUser());
- localFs.mkdirs(new Path(localDirs[i]));
- }
- // TODO Avoid this directory creation on each work-unit submission.
if (LOG.isDebugEnabled()) {
LOG.debug("Dirs are: " + Arrays.toString(localDirs));
}
@@ -195,7 +186,9 @@ public void submitWork(SubmitWorkRequestProto request) throws IOException {
// TODO Unregistering does not happen at the moment, since there's no signals on when an app completes.
LOG.info("DEBUG: Registering request with the ShuffleHandler");
- ShuffleHandler.get().registerApplication(request.getApplicationIdString(), jobToken, request.getUser(), localDirs);
+ ShuffleHandler.get()
+ .registerDag(request.getApplicationIdString(), dagIdentifier, jobToken,
+ request.getUser(), localDirs);
ConcurrentMap sourceCompletionMap = getSourceCompletionMap(request.getFragmentSpec().getDagName());
TaskRunnerCallable callable = new TaskRunnerCallable(request, new Configuration(getConfig()),
@@ -209,10 +202,6 @@ public void submitWork(SubmitWorkRequestProto request) throws IOException {
}
}
- private void notifyAMOfRejection(TaskRunnerCallable callable) {
- LOG.error("Notifying AM of request rejection is not implemented yet!");
- }
-
@Override
public void sourceStateUpdated(SourceStateUpdatedRequestProto request) {
LOG.info("Processing state update: " + stringifySourceStateUpdateRequest(request));
@@ -220,6 +209,21 @@ public void sourceStateUpdated(SourceStateUpdatedRequestProto request) {
dagMap.put(request.getSrcName(), request.getState());
}
+ @Override
+ public void queryComplete(QueryCompleteRequestProto request) {
+ queryTracker.queryComplete(null, request.getDagName(), request.getDeleteDelay());
+ }
+
+ @Override
+ public void terminateFragment(TerminateFragmentRequestProto request) {
+ // TODO Implement when this gets used.
+ }
+
+
+ private void notifyAMOfRejection(TaskRunnerCallable callable) {
+ LOG.error("Notifying AM of request rejection is not implemented yet!");
+ }
+
private String stringifySourceStateUpdateRequest(SourceStateUpdatedRequestProto request) {
StringBuilder sb = new StringBuilder();
sb.append("dagName=").append(request.getDagName())
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index 86b1f5c..fabacf7 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -27,7 +27,10 @@
import org.apache.hadoop.hive.llap.configuration.LlapConfiguration;
import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
import org.apache.hadoop.hive.llap.daemon.registry.impl.LlapRegistryService;
-import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto;
import org.apache.hadoop.hive.llap.io.api.LlapIoProxy;
import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
import org.apache.hadoop.hive.llap.metrics.LlapMetricsSystem;
@@ -252,17 +255,27 @@ public static void main(String[] args) throws Exception {
}
@Override
- public void submitWork(LlapDaemonProtocolProtos.SubmitWorkRequestProto request) throws
+ public void submitWork(SubmitWorkRequestProto request) throws
IOException {
numSubmissions.incrementAndGet();
containerRunner.submitWork(request);
}
@Override
- public void sourceStateUpdated(LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto request) {
+ public void sourceStateUpdated(SourceStateUpdatedRequestProto request) {
containerRunner.sourceStateUpdated(request);
}
+ @Override
+ public void queryComplete(QueryCompleteRequestProto request) {
+ containerRunner.queryComplete(request);
+ }
+
+ @Override
+ public void terminateFragment(TerminateFragmentRequestProto request) {
+ containerRunner.terminateFragment(request);
+ }
+
@VisibleForTesting
public long getNumSubmissions() {
return numSubmissions.get();
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolClientImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolClientImpl.java
index 01b53c2..9f161fe 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolClientImpl.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolClientImpl.java
@@ -20,10 +20,15 @@
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
@@ -66,6 +71,28 @@ public SourceStateUpdatedResponseProto sourceStateUpdated(RpcController controll
}
}
+ @Override
+ public QueryCompleteResponseProto queryComplete(RpcController controller,
+ QueryCompleteRequestProto request) throws
+ ServiceException {
+ try {
+ return getProxy().queryComplete(null, request);
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public TerminateFragmentResponseProto terminateFragment(
+ RpcController controller,
+ TerminateFragmentRequestProto request) throws ServiceException {
+ try {
+ return getProxy().terminateFragment(null, request);
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
public LlapDaemonProtocolBlockingPB getProxy() throws IOException {
if (proxy == null) {
proxy = createProxy();
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolServerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolServerImpl.java
index 0360a27..8cb9715 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolServerImpl.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonProtocolServerImpl.java
@@ -27,10 +27,14 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
@@ -84,6 +88,22 @@ public SourceStateUpdatedResponseProto sourceStateUpdated(RpcController controll
}
@Override
+ public QueryCompleteResponseProto queryComplete(RpcController controller,
+ QueryCompleteRequestProto request) throws
+ ServiceException {
+ containerRunner.queryComplete(request);
+ return QueryCompleteResponseProto.getDefaultInstance();
+ }
+
+ @Override
+ public TerminateFragmentResponseProto terminateFragment(
+ RpcController controller,
+ TerminateFragmentRequestProto request) throws ServiceException {
+ containerRunner.terminateFragment(request);
+ return TerminateFragmentResponseProto.getDefaultInstance();
+ }
+
+ @Override
public void serviceStart() {
Configuration conf = getConfig();
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryFileCleaner.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryFileCleaner.java
new file mode 100644
index 0000000..bc18a77
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryFileCleaner.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon.impl;
+
+import java.io.IOException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.ListeningScheduledExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.llap.configuration.LlapConfiguration;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.tez.common.CallableWithNdc;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class QueryFileCleaner extends AbstractService {
+
+ private static final Logger LOG = LoggerFactory.getLogger(QueryFileCleaner.class);
+
+ private final ListeningScheduledExecutorService executorService;
+ private final FileSystem localFs;
+
+
+ public QueryFileCleaner(Configuration conf, FileSystem localFs) {
+ super(QueryFileCleaner.class.getName());
+ int numCleanerThreads = conf.getInt(LlapConfiguration.LLAP_DAEMON_NUM_FILE_CLEANER_THREADS,
+ LlapConfiguration.LLAP_DAEMON_NUM_FILE_CLEANER_THREADS_DEFAULT);
+ ScheduledExecutorService rawExecutor = Executors.newScheduledThreadPool(numCleanerThreads,
+ new ThreadFactoryBuilder().setDaemon(true).setNameFormat("QueryFileCleaner %d").build());
+ this.executorService = MoreExecutors.listeningDecorator(rawExecutor);
+ this.localFs = localFs;
+ }
+
+ public void serviceStart() {
+ LOG.info(getName() + " started");
+ }
+
+ @Override
+ public void serviceStop() {
+ executorService.shutdownNow();
+ LOG.info(getName() + " stopped");
+ }
+
+ public void cleanupDir(String dir, long deleteDelay) {
+ LOG.info("Scheduling deletion of {} after {} seconds", dir, deleteDelay);
+ executorService.schedule(new FileCleanerCallable(dir), deleteDelay, TimeUnit.SECONDS);
+ }
+
+ private class FileCleanerCallable extends CallableWithNdc {
+
+ private final String dirToDelete;
+
+ private FileCleanerCallable(String dirToDelete) {
+ this.dirToDelete = dirToDelete;
+ }
+
+ @Override
+ protected Void callInternal() {
+ Path pathToDelete = new Path(dirToDelete);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Deleting path: " + pathToDelete);
+ }
+ try {
+ localFs.delete(new Path(dirToDelete), true);
+ } catch (IOException e) {
+ LOG.warn("Ignoring exception while cleaning up path: " + pathToDelete, e);
+ }
+ return null;
+ }
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
new file mode 100644
index 0000000..16d745b
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Tracks queries running within a daemon
+ */
+public class QueryTracker {
+
+ private static final Logger LOG = LoggerFactory.getLogger(QueryTracker.class);
+ private final QueryFileCleaner queryFileCleaner;
+
+ // TODO Make use if the query id for cachin when this is available.
+ private final ConcurrentHashMap queryInfoMap = new ConcurrentHashMap<>();
+
+ private final String[] localDirsBase;
+ private final FileSystem localFs;
+
+ public QueryTracker(Configuration conf, String[] localDirsBase) {
+ this.localDirsBase = localDirsBase;
+ try {
+ localFs = FileSystem.getLocal(conf);
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to setup local filesystem instance", e);
+ }
+ queryFileCleaner = new QueryFileCleaner(conf, localFs);
+ queryFileCleaner.init(conf);
+ queryFileCleaner.start();
+ }
+
+
+ void registerFragment(String queryId, String appIdString, String dagName, int dagIdentifier,
+ String vertexName, int fragmentNumber, int attemptNumber,
+ String user) throws
+ IOException {
+ QueryInfo queryInfo = queryInfoMap.get(dagName);
+ if (queryInfo == null) {
+ queryInfo = new QueryInfo(queryId, appIdString, dagName, dagIdentifier, user);
+ queryInfoMap.putIfAbsent(dagName, queryInfo);
+ }
+ }
+
+ String[] getLocalDirs(String queryId, String dagName, String user) throws IOException {
+ QueryInfo queryInfo = queryInfoMap.get(dagName);
+ return queryInfo.getLocalDirs();
+ }
+
+ void queryComplete(String queryId, String dagName, long deleteDelay) {
+ LOG.info("Processing queryComplete for dagName={} with deleteDelay={} seconds", dagName, deleteDelay);
+ QueryInfo queryInfo = queryInfoMap.remove(dagName);
+ if (queryInfo == null) {
+ LOG.warn("Ignoring query complete for unknown dag: {}", dagName);
+ }
+ String []localDirs = queryInfo.getLocalDirsNoCreate();
+ if (localDirs != null) {
+ for (String localDir : localDirs) {
+ queryFileCleaner.cleanupDir(localDir, deleteDelay);
+ ShuffleHandler.get().unregisterDag(localDir, dagName, queryInfo.dagIdentifier);
+ }
+ }
+ // TODO HIVE-10535 Cleanup map join cache
+ }
+
+ void shutdown() {
+ queryFileCleaner.stop();
+ }
+
+
+ private class QueryInfo {
+
+ private final String queryId;
+ private final String appIdString;
+ private final String dagName;
+ private final int dagIdentifier;
+ private final String user;
+ private String[] localDirs;
+
+ public QueryInfo(String queryId, String appIdString, String dagName, int dagIdentifier,
+ String user) {
+ this.queryId = queryId;
+ this.appIdString = appIdString;
+ this.dagName = dagName;
+ this.dagIdentifier = dagIdentifier;
+ this.user = user;
+ }
+
+
+
+
+ private synchronized void createLocalDirs() throws IOException {
+ if (localDirs == null) {
+ localDirs = new String[localDirsBase.length];
+ for (int i = 0; i < localDirsBase.length; i++) {
+ localDirs[i] = createAppSpecificLocalDir(localDirsBase[i], appIdString, user, dagIdentifier);
+ localFs.mkdirs(new Path(localDirs[i]));
+ }
+ }
+ }
+
+ private synchronized String[] getLocalDirs() throws IOException {
+ if (localDirs == null) {
+ createLocalDirs();
+ }
+ return localDirs;
+ }
+
+ private synchronized String[] getLocalDirsNoCreate() {
+ return this.localDirs;
+ }
+ }
+
+ private static String createAppSpecificLocalDir(String baseDir, String applicationIdString,
+ String user, int dagIdentifier) {
+ // TODO This is broken for secure clusters. The app will not have permission to create these directories.
+ // May work via Slider - since the directory would already exist. Otherwise may need a custom shuffle handler.
+ // TODO This should be the process user - and not the user on behalf of whom the query is being submitted.
+ return baseDir + File.separator + "usercache" + File.separator + user + File.separator +
+ "appcache" + File.separator + applicationIdString + File.separator + dagIdentifier;
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/DirWatcher.java llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/DirWatcher.java
index 08e4787..b1d2cf7 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/DirWatcher.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/DirWatcher.java
@@ -50,7 +50,7 @@
private static final Log LOG = LogFactory.getLog(DirWatcher.class);
private static enum Type {
- BASE, // App Base Dir
+ BASE, // App Base Dir / ${dagDir}
OUTPUT, // appBase/output/
FINAL, // appBase/output/attemptDir
}
@@ -95,9 +95,12 @@
* @param expiry when to expire the watch - in ms
* @throws IOException
*/
- void registerApplicationDir(String pathString, String appId, String user, long expiry) throws IOException {
+ void registerDagDir(String pathString, String appId, int dagIdentifier, String user, long expiry) throws IOException {
+ // The path string contains the dag Identifier
Path path = FileSystems.getDefault().getPath(pathString);
- WatchedPathInfo watchedPathInfo = new WatchedPathInfo(System.currentTimeMillis() + expiry, Type.BASE, appId, user);
+ WatchedPathInfo watchedPathInfo =
+ new WatchedPathInfo(System.currentTimeMillis() + expiry, Type.BASE, appId, dagIdentifier,
+ user);
watchedPaths.put(path, watchedPathInfo);
WatchKey watchKey = path.register(watchService, ENTRY_CREATE);
watchedPathInfo.setWatchKey(watchKey);
@@ -106,6 +109,10 @@ void registerApplicationDir(String pathString, String appId, String user, long e
// TODO Watches on the output dirs need to be cancelled at some point. For now - via the expiry.
}
+ void unregisterDagDir(String pathString, String appId, int dagIdentifier) {
+ // TODO Implement to remove all watches for the specified pathString and it's sub-tree
+ }
+
/**
* Invoke when a pathIdentifier has been found, or is no longer of interest
* @param pathIdentifier
@@ -226,7 +233,7 @@ public void watch() {
cancelledWatch = true;
watchKey.cancel();
} else {
- LOG.warn("DEBUG: Found unexpected directory: " + event.context() + " under " + watchedPath);
+ LOG.warn("DEBUG: Found unexpected directory while looking for OUTPUT: " + event.context() + " under " + watchedPath);
}
break;
case OUTPUT:
@@ -349,15 +356,17 @@ boolean isComplete() {
final long expiry;
final Type type;
final String appId;
+ final int dagId;
final String user;
final String attemptId;
final AttemptPathIdentifier pathIdentifier;
WatchKey watchKey;
- public WatchedPathInfo(long expiry, Type type, String jobId, String user) {
+ public WatchedPathInfo(long expiry, Type type, String jobId, int dagId, String user) {
this.expiry = expiry;
this.type = type;
this.appId = jobId;
+ this.dagId = dagId;
this.user = user;
this.attemptId = null;
this.pathIdentifier = null;
@@ -367,10 +376,11 @@ public WatchedPathInfo(WatchedPathInfo other, Type type, String attemptId) {
this.expiry = other.expiry;
this.appId = other.appId;
this.user = other.user;
+ this.dagId = other.dagId;
this.type = type;
this.attemptId = attemptId;
if (attemptId != null) {
- pathIdentifier = new AttemptPathIdentifier(appId, user, attemptId);
+ pathIdentifier = new AttemptPathIdentifier(appId, dagId, user, attemptId);
} else {
pathIdentifier = null;
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
index d640b36..2572c75 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
@@ -408,11 +408,13 @@ public int getPort() {
/**
* Register an application and it's associated credentials and user information.
* @param applicationIdString
+ * @param dagIdentifier
* @param appToken
* @param user
*/
- public void registerApplication(String applicationIdString, Token appToken,
- String user, String [] appDirs) {
+ public void registerDag(String applicationIdString, int dagIdentifier,
+ Token appToken,
+ String user, String[] appDirs) {
// TODO Fix this. There's a race here, where an app may think everything is registered, finish really fast, send events and the consumer will not find the registration.
Boolean registered = registeredApps.putIfAbsent(applicationIdString, Boolean.valueOf(true));
if (registered == null) {
@@ -421,7 +423,8 @@ public void registerApplication(String applicationIdString, Token mapIds = splitMaps(q.get("map"));
final List reduceQ = q.get("reduce");
final List jobQ = q.get("job");
+ final List dagIdQ = q.get("dag");
if (LOG.isDebugEnabled()) {
LOG.debug("RECV: " + request.getUri() +
"\n mapId: " + mapIds +
"\n reduceId: " + reduceQ +
"\n jobId: " + jobQ +
+ "\n dagId: " + dagIdQ +
"\n keepAlive: " + keepAliveParam);
}
- if (mapIds == null || reduceQ == null || jobQ == null) {
+ if (mapIds == null || reduceQ == null || jobQ == null | dagIdQ == null) {
sendError(ctx, "Required param job, map and reduce", BAD_REQUEST);
return;
}
- if (reduceQ.size() != 1 || jobQ.size() != 1) {
+ if (reduceQ.size() != 1 || jobQ.size() != 1 || dagIdQ.size() != 1) {
sendError(ctx, "Too many job/reduce parameters", BAD_REQUEST);
return;
}
int reduceId;
String jobId;
+ int dagId;
try {
reduceId = Integer.parseInt(reduceQ.get(0));
jobId = jobQ.get(0);
+ dagId = Integer.parseInt(dagIdQ.get(0));
} catch (NumberFormatException e) {
sendError(ctx, "Bad reduce parameter", BAD_REQUEST);
return;
@@ -683,7 +694,7 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent evt)
String user = userRsrc.get(jobId);
try {
- populateHeaders(mapIds, jobId, user, reduceId,
+ populateHeaders(mapIds, jobId, dagId, user, reduceId,
response, keepAliveParam, mapOutputInfoMap);
} catch(IOException e) {
ch.write(response);
@@ -701,7 +712,7 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent evt)
// This will be hit if there's a large number of mapIds in a single request
// (Determined by the cache size further up), in which case we go to disk again.
if (info == null) {
- info = getMapOutputInfo(jobId, mapId, reduceId, user);
+ info = getMapOutputInfo(jobId, dagId, mapId, reduceId, user);
}
lastMap =
sendMapOutput(ctx, ch, user, mapId,
@@ -730,11 +741,11 @@ private String getErrorMessage(Throwable t) {
}
- protected MapOutputInfo getMapOutputInfo(String jobId, String mapId,
+ protected MapOutputInfo getMapOutputInfo(String jobId, int dagId, String mapId,
int reduce, String user) throws IOException {
AttemptPathInfo pathInfo;
try {
- AttemptPathIdentifier identifier = new AttemptPathIdentifier(jobId, user, mapId);
+ AttemptPathIdentifier identifier = new AttemptPathIdentifier(jobId, dagId, user, mapId);
pathInfo = pathCache.get(identifier);
LOG.info("DEBUG: Retrieved pathInfo for " + identifier + " check for corresponding loaded messages to determine whether it was loaded or cached");
} catch (ExecutionException e) {
@@ -758,7 +769,7 @@ protected MapOutputInfo getMapOutputInfo(String jobId, String mapId,
return outputInfo;
}
- protected void populateHeaders(List mapIds, String jobId,
+ protected void populateHeaders(List mapIds, String jobId, int dagId,
String user, int reduce, HttpResponse response,
boolean keepAliveParam, Map mapOutputInfoMap)
throws IOException {
@@ -767,7 +778,7 @@ protected void populateHeaders(List mapIds, String jobId,
long contentLength = 0;
for (String mapId : mapIds) {
- MapOutputInfo outputInfo = getMapOutputInfo(jobId, mapId, reduce, user);
+ MapOutputInfo outputInfo = getMapOutputInfo(jobId, dagId, mapId, reduce, user);
// mapOutputInfoMap is used to share the lookups with the caller
if (mapOutputInfoMap.size() < mapOutputMetaInfoCacheSize) {
mapOutputInfoMap.put(mapId, outputInfo);
@@ -952,8 +963,8 @@ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
private static final String USERCACHE_CONSTANT = "usercache";
private static final String APPCACHE_CONSTANT = "appcache";
- private static String getBaseLocation(String jobIdString, String user) {
- // $x/$user/appcache/$appId/output/$mapId
+ private static String getBaseLocation(String jobIdString, int dagId, String user) {
+ // $x/$user/appcache/$appId/${dagId}/output/$mapId
// TODO: Once Shuffle is out of NM, this can use MR APIs to convert
// between App and Job
String parts[] = jobIdString.split("_");
@@ -963,7 +974,9 @@ private static String getBaseLocation(String jobIdString, String user) {
final String baseStr =
USERCACHE_CONSTANT + "/" + user + "/"
+ APPCACHE_CONSTANT + "/"
- + ConverterUtils.toString(appID) + "/output" + "/";
+ + ConverterUtils.toString(appID)
+ + "/" + dagId
+ + "/output" + "/";
return baseStr;
}
@@ -980,11 +993,13 @@ public AttemptPathInfo(Path indexPath, Path dataPath) {
static class AttemptPathIdentifier {
private final String jobId;
+ private final int dagId;
private final String user;
private final String attemptId;
- public AttemptPathIdentifier(String jobId, String user, String attemptId) {
+ public AttemptPathIdentifier(String jobId, int dagId, String user, String attemptId) {
this.jobId = jobId;
+ this.dagId = dagId;
this.user = user;
this.attemptId = attemptId;
}
@@ -1000,19 +1015,20 @@ public boolean equals(Object o) {
AttemptPathIdentifier that = (AttemptPathIdentifier) o;
- if (!attemptId.equals(that.attemptId)) {
+ if (dagId != that.dagId) {
return false;
}
if (!jobId.equals(that.jobId)) {
return false;
}
+ return attemptId.equals(that.attemptId);
- return true;
}
@Override
public int hashCode() {
int result = jobId.hashCode();
+ result = 31 * result + dagId;
result = 31 * result + attemptId.hashCode();
return result;
}
@@ -1020,11 +1036,11 @@ public int hashCode() {
@Override
public String toString() {
return "AttemptPathIdentifier{" +
- "attemptId='" + attemptId + '\'' +
- ", jobId='" + jobId + '\'' +
+ "jobId='" + jobId + '\'' +
+ ", dagId=" + dagId +
+ ", user='" + user + '\'' +
+ ", attemptId='" + attemptId + '\'' +
'}';
}
}
-
-
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
index d35b04a..99459e4 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
@@ -16,7 +16,9 @@
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.util.HashSet;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.RejectedExecutionException;
@@ -26,12 +28,12 @@
import com.google.common.collect.HashBiMap;
import com.google.protobuf.ByteString;
import com.google.protobuf.ServiceException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.llap.LlapNodeId;
import org.apache.hadoop.hive.llap.configuration.LlapConfiguration;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
@@ -61,10 +63,12 @@
import org.apache.tez.runtime.api.impl.TaskSpec;
import org.apache.tez.runtime.api.impl.TezHeartbeatRequest;
import org.apache.tez.runtime.api.impl.TezHeartbeatResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
- private static final Log LOG = LogFactory.getLog(LlapTaskCommunicator.class);
+ private static final Logger LOG = LoggerFactory.getLogger(LlapTaskCommunicator.class);
private final SubmitWorkRequestProto BASE_SUBMIT_WORK_REQUEST;
private final ConcurrentMap credentialMap;
@@ -73,8 +77,10 @@
// When DAG specific cleanup happens, it'll be better to link this to a DAG though.
private final EntityTracker entityTracker = new EntityTracker();
private final SourceStateTracker sourceStateTracker;
+ private final Set nodesForQuery = new HashSet<>();
private TaskCommunicator communicator;
+ private long deleteDelayOnDagComplete;
private final LlapTaskUmbilicalProtocol umbilical;
private volatile String currentDagName;
@@ -106,6 +112,11 @@ public void serviceInit(Configuration conf) throws Exception {
int numThreads = conf.getInt(LlapConfiguration.LLAP_DAEMON_COMMUNICATOR_NUM_THREADS,
LlapConfiguration.LLAP_DAEMON_COMMUNICATOR_NUM_THREADS_DEFAULT);
this.communicator = new TaskCommunicator(numThreads);
+ this.deleteDelayOnDagComplete = conf.getLong(LlapConfiguration.LLAP_FILE_CLEANUP_DELAY_SECONDS,
+ LlapConfiguration.LLAP_FILE_CLEANUP_DELAY_SECONDS_DEFAULT);
+ LOG.info("Running LlapTaskCommunicator with "
+ + "fileCleanupDelay=" + deleteDelayOnDagComplete
+ + ", numCommunicatorThreads=" + numThreads);
this.communicator.init(conf);
}
@@ -131,21 +142,23 @@ protected void startRpcServer() {
new JobTokenSecretManager();
jobTokenSecretManager.addTokenForJob(tokenIdentifier, sessionToken);
+ int numHandlers = conf.getInt(TezConfiguration.TEZ_AM_TASK_LISTENER_THREAD_COUNT,
+ TezConfiguration.TEZ_AM_TASK_LISTENER_THREAD_COUNT_DEFAULT);
server = new RPC.Builder(conf)
.setProtocol(LlapTaskUmbilicalProtocol.class)
.setBindAddress("0.0.0.0")
.setPort(0)
.setInstance(umbilical)
- .setNumHandlers(
- conf.getInt(TezConfiguration.TEZ_AM_TASK_LISTENER_THREAD_COUNT,
- TezConfiguration.TEZ_AM_TASK_LISTENER_THREAD_COUNT_DEFAULT))
+ .setNumHandlers(numHandlers)
.setSecretManager(jobTokenSecretManager).build();
// Do serviceACLs need to be refreshed, like in Tez ?
server.start();
this.address = NetUtils.getConnectAddress(server);
- LOG.info("Started LlapUmbilical: " + umbilical.getClass().getName() + " at address: " + address);
+ LOG.info(
+ "Started LlapUmbilical: " + umbilical.getClass().getName() + " at address: " + address +
+ " with numHandlers=" + numHandlers);
} catch (IOException e) {
throw new TezUncheckedException(e);
}
@@ -192,7 +205,9 @@ public void registerRunningTaskAttempt(final ContainerId containerId, final Task
", while trying to launch task: " + taskSpec.getTaskAttemptID());
}
+ LlapNodeId nodeId = LlapNodeId.getInstance(host, port);
entityTracker.registerTaskAttempt(containerId, taskSpec.getTaskAttemptID(), host, port);
+ nodesForQuery.add(nodeId);
sourceStateTracker.registerTaskForStateUpdates(host, port, taskSpec.getInputs());
FragmentRuntimeInfo fragmentRuntimeInfo = sourceStateTracker.getFragmentRuntimeInfo(taskSpec.getDAGName(),
@@ -269,6 +284,29 @@ public void unregisterRunningTaskAttempt(TezTaskAttemptID taskAttemptID) {
}
@Override
+ public void dagComplete(final String dagName) {
+ QueryCompleteRequestProto request = QueryCompleteRequestProto.newBuilder().setDagName(
+ dagName).setDeleteDelay(deleteDelayOnDagComplete).build();
+ for (final LlapNodeId llapNodeId : nodesForQuery) {
+ LOG.info("Sending dagComplete message for {}, to {}", dagName, llapNodeId);
+ communicator.sendQueryComplete(request, llapNodeId.getHostname(), llapNodeId.getPort(),
+ new TaskCommunicator.ExecuteRequestCallback() {
+ @Override
+ public void setResponse(LlapDaemonProtocolProtos.QueryCompleteResponseProto response) {
+ }
+
+ @Override
+ public void indicateError(Throwable t) {
+ LOG.warn("Failed to indicate dag complete dagId={} to node {}", dagName, llapNodeId);
+ }
+ });
+ }
+
+ nodesForQuery.clear();
+ // TODO Ideally move some of the other cleanup code from resetCurrentDag over here
+ }
+
+ @Override
public void onVertexStateUpdated(VertexStateUpdate vertexStateUpdate) {
// Delegate updates over to the source state tracker.
sourceStateTracker
@@ -301,9 +339,9 @@ private void resetCurrentDag(String newDagName) {
// Working on the assumption that a single DAG runs at a time per AM.
currentDagName = newDagName;
sourceStateTracker.resetState(newDagName);
+ nodesForQuery.clear();
LOG.info("CurrentDag set to: " + newDagName);
- // TODO Additional state reset. Potentially sending messages to node to reset.
- // Is it possible for heartbeats to come in from lost tasks - those should be told to die, which
+ // TODO Is it possible for heartbeats to come in from lost tasks - those should be told to die, which
// is likely already happening.
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/TaskCommunicator.java llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/TaskCommunicator.java
index 3b4612d..d536eb2 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/TaskCommunicator.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/TaskCommunicator.java
@@ -31,6 +31,8 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.llap.daemon.LlapDaemonProtocolBlockingPB;
import org.apache.hadoop.hive.llap.daemon.impl.LlapDaemonProtocolClientImpl;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
@@ -74,7 +76,8 @@ public void onFailure(Throwable t) {
}
- public void sendSourceStateUpdate(final SourceStateUpdatedRequestProto request, final String host, final int port,
+ public void sendSourceStateUpdate(final SourceStateUpdatedRequestProto request, final String host,
+ final int port,
final ExecuteRequestCallback callback) {
ListenableFuture future =
executor.submit(new SendSourceStateUpdateCallable(host, port, request));
@@ -91,7 +94,26 @@ public void onFailure(Throwable t) {
});
}
- private static abstract class CallableRequest implements Callable {
+ public void sendQueryComplete(final QueryCompleteRequestProto request, final String host,
+ final int port,
+ final ExecuteRequestCallback callback) {
+ ListenableFuture future =
+ executor.submit(new SendQueryCompleteCallable(host, port, request));
+ Futures.addCallback(future, new FutureCallback() {
+ @Override
+ public void onSuccess(QueryCompleteResponseProto result) {
+ callback.setResponse(result);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ callback.indicateError(t);
+ }
+ });
+ }
+
+ private static abstract class CallableRequest
+ implements Callable {
final String hostname;
final int port;
@@ -134,6 +156,20 @@ public SourceStateUpdatedResponseProto call() throws Exception {
}
}
+ private class SendQueryCompleteCallable
+ extends CallableRequest {
+
+ protected SendQueryCompleteCallable(String hostname, int port,
+ QueryCompleteRequestProto queryCompleteRequestProto) {
+ super(hostname, port, queryCompleteRequestProto);
+ }
+
+ @Override
+ public QueryCompleteResponseProto call() throws Exception {
+ return getProxy(hostname, port).queryComplete(null, request);
+ }
+ }
+
public interface ExecuteRequestCallback {
void setResponse(T response);
void indicateError(Throwable t);
diff --git llap-server/src/protobuf/LlapDaemonProtocol.proto llap-server/src/protobuf/LlapDaemonProtocol.proto
index f7e6d2b..e098e87 100644
--- llap-server/src/protobuf/LlapDaemonProtocol.proto
+++ llap-server/src/protobuf/LlapDaemonProtocol.proto
@@ -98,7 +98,30 @@ message SourceStateUpdatedRequestProto {
message SourceStateUpdatedResponseProto {
}
+message QueryCompleteRequestProto {
+ optional string query_id = 1;
+ optional string dag_name = 2;
+ optional int64 delete_delay = 3 [default = 0];
+}
+
+message QueryCompleteResponseProto {
+}
+
+message TerminateFragmentRequestProto {
+ optional string query_id = 1;
+ optional string dag_name = 2;
+ optional int32 dag_attempt_number = 3;
+ optional string vertex_name = 4;
+ optional int32 fragment_number = 5;
+ optional int32 attempt_number = 6;
+}
+
+message TerminateFragmentResponseProto {
+}
+
service LlapDaemonProtocol {
rpc submitWork(SubmitWorkRequestProto) returns (SubmitWorkResponseProto);
rpc sourceStateUpdated(SourceStateUpdatedRequestProto) returns (SourceStateUpdatedResponseProto);
+ rpc queryComplete(QueryCompleteRequestProto) returns (QueryCompleteResponseProto);
+ rpc terminateFragment(TerminateFragmentRequestProto) returns (TerminateFragmentResponseProto);
}