diff --git llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
index af009b8..4f63c98 100644
--- llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
+++ llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
@@ -1,5 +1,5 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: LlapDaemonProtocol.proto
+// source: llap-server/src/protobuf/LlapDaemonProtocol.proto
package org.apache.hadoop.hive.llap.daemon.rpc;
@@ -3154,6 +3154,16 @@ public Builder clearMergedInputDescriptor() {
com.google.protobuf.ByteString
getDagNameBytes();
+ // optional int32 dag_id = 11;
+ /**
+ * optional int32 dag_id = 11;
+ */
+ boolean hasDagId();
+ /**
+ * optional int32 dag_id = 11;
+ */
+ int getDagId();
+
// optional string vertex_name = 3;
/**
* optional string vertex_name = 3;
@@ -3350,13 +3360,13 @@ private FragmentSpecProto(
break;
}
case 26: {
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000008;
vertexName_ = input.readBytes();
break;
}
case 34: {
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder subBuilder = null;
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = processorDescriptor_.toBuilder();
}
processorDescriptor_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.PARSER, extensionRegistry);
@@ -3364,48 +3374,53 @@ private FragmentSpecProto(
subBuilder.mergeFrom(processorDescriptor_);
processorDescriptor_ = subBuilder.buildPartial();
}
- bitField0_ |= 0x00000008;
+ bitField0_ |= 0x00000010;
break;
}
case 42: {
- if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
inputSpecs_ = new java.util.ArrayList();
- mutable_bitField0_ |= 0x00000010;
+ mutable_bitField0_ |= 0x00000020;
}
inputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry));
break;
}
case 50: {
- if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+ if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
outputSpecs_ = new java.util.ArrayList();
- mutable_bitField0_ |= 0x00000020;
+ mutable_bitField0_ |= 0x00000040;
}
outputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry));
break;
}
case 58: {
- if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
groupedInputSpecs_ = new java.util.ArrayList();
- mutable_bitField0_ |= 0x00000040;
+ mutable_bitField0_ |= 0x00000080;
}
groupedInputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.PARSER, extensionRegistry));
break;
}
case 64: {
- bitField0_ |= 0x00000010;
+ bitField0_ |= 0x00000020;
vertexParallelism_ = input.readInt32();
break;
}
case 72: {
- bitField0_ |= 0x00000020;
+ bitField0_ |= 0x00000040;
fragmentNumber_ = input.readInt32();
break;
}
case 80: {
- bitField0_ |= 0x00000040;
+ bitField0_ |= 0x00000080;
attemptNumber_ = input.readInt32();
break;
}
+ case 88: {
+ bitField0_ |= 0x00000004;
+ dagId_ = input.readInt32();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -3414,13 +3429,13 @@ private FragmentSpecProto(
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
- if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
inputSpecs_ = java.util.Collections.unmodifiableList(inputSpecs_);
}
- if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+ if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
outputSpecs_ = java.util.Collections.unmodifiableList(outputSpecs_);
}
- if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
groupedInputSpecs_ = java.util.Collections.unmodifiableList(groupedInputSpecs_);
}
this.unknownFields = unknownFields.build();
@@ -3541,6 +3556,22 @@ public boolean hasDagName() {
}
}
+ // optional int32 dag_id = 11;
+ public static final int DAG_ID_FIELD_NUMBER = 11;
+ private int dagId_;
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public boolean hasDagId() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public int getDagId() {
+ return dagId_;
+ }
+
// optional string vertex_name = 3;
public static final int VERTEX_NAME_FIELD_NUMBER = 3;
private java.lang.Object vertexName_;
@@ -3548,7 +3579,7 @@ public boolean hasDagName() {
* optional string vertex_name = 3;
*/
public boolean hasVertexName() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
+ return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional string vertex_name = 3;
@@ -3591,7 +3622,7 @@ public boolean hasVertexName() {
* optional .EntityDescriptorProto processor_descriptor = 4;
*/
public boolean hasProcessorDescriptor() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
+ return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .EntityDescriptorProto processor_descriptor = 4;
@@ -3721,7 +3752,7 @@ public int getGroupedInputSpecsCount() {
* optional int32 vertex_parallelism = 8;
*/
public boolean hasVertexParallelism() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
+ return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional int32 vertex_parallelism = 8;
@@ -3737,7 +3768,7 @@ public int getVertexParallelism() {
* optional int32 fragment_number = 9;
*/
public boolean hasFragmentNumber() {
- return ((bitField0_ & 0x00000020) == 0x00000020);
+ return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional int32 fragment_number = 9;
@@ -3753,7 +3784,7 @@ public int getFragmentNumber() {
* optional int32 attempt_number = 10;
*/
public boolean hasAttemptNumber() {
- return ((bitField0_ & 0x00000040) == 0x00000040);
+ return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional int32 attempt_number = 10;
@@ -3765,6 +3796,7 @@ public int getAttemptNumber() {
private void initFields() {
fragmentIdentifierString_ = "";
dagName_ = "";
+ dagId_ = 0;
vertexName_ = "";
processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
inputSpecs_ = java.util.Collections.emptyList();
@@ -3792,10 +3824,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getDagNameBytes());
}
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(3, getVertexNameBytes());
}
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(4, processorDescriptor_);
}
for (int i = 0; i < inputSpecs_.size(); i++) {
@@ -3807,15 +3839,18 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
for (int i = 0; i < groupedInputSpecs_.size(); i++) {
output.writeMessage(7, groupedInputSpecs_.get(i));
}
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeInt32(8, vertexParallelism_);
}
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeInt32(9, fragmentNumber_);
}
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeInt32(10, attemptNumber_);
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt32(11, dagId_);
+ }
getUnknownFields().writeTo(output);
}
@@ -3833,11 +3868,11 @@ public int getSerializedSize() {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getDagNameBytes());
}
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getVertexNameBytes());
}
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, processorDescriptor_);
}
@@ -3853,18 +3888,22 @@ public int getSerializedSize() {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, groupedInputSpecs_.get(i));
}
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(8, vertexParallelism_);
}
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(9, fragmentNumber_);
}
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(10, attemptNumber_);
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(11, dagId_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -3898,6 +3937,11 @@ public boolean equals(final java.lang.Object obj) {
result = result && getDagName()
.equals(other.getDagName());
}
+ result = result && (hasDagId() == other.hasDagId());
+ if (hasDagId()) {
+ result = result && (getDagId()
+ == other.getDagId());
+ }
result = result && (hasVertexName() == other.hasVertexName());
if (hasVertexName()) {
result = result && getVertexName()
@@ -3950,6 +3994,10 @@ public int hashCode() {
hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
hash = (53 * hash) + getDagName().hashCode();
}
+ if (hasDagId()) {
+ hash = (37 * hash) + DAG_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getDagId();
+ }
if (hasVertexName()) {
hash = (37 * hash) + VERTEX_NAME_FIELD_NUMBER;
hash = (53 * hash) + getVertexName().hashCode();
@@ -4099,38 +4147,40 @@ public Builder clear() {
bitField0_ = (bitField0_ & ~0x00000001);
dagName_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
- vertexName_ = "";
+ dagId_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
+ vertexName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000008);
if (processorDescriptorBuilder_ == null) {
processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
} else {
processorDescriptorBuilder_.clear();
}
- bitField0_ = (bitField0_ & ~0x00000008);
+ bitField0_ = (bitField0_ & ~0x00000010);
if (inputSpecsBuilder_ == null) {
inputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000010);
+ bitField0_ = (bitField0_ & ~0x00000020);
} else {
inputSpecsBuilder_.clear();
}
if (outputSpecsBuilder_ == null) {
outputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000020);
+ bitField0_ = (bitField0_ & ~0x00000040);
} else {
outputSpecsBuilder_.clear();
}
if (groupedInputSpecsBuilder_ == null) {
groupedInputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000040);
+ bitField0_ = (bitField0_ & ~0x00000080);
} else {
groupedInputSpecsBuilder_.clear();
}
vertexParallelism_ = 0;
- bitField0_ = (bitField0_ & ~0x00000080);
- fragmentNumber_ = 0;
bitField0_ = (bitField0_ & ~0x00000100);
- attemptNumber_ = 0;
+ fragmentNumber_ = 0;
bitField0_ = (bitField0_ & ~0x00000200);
+ attemptNumber_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000400);
return this;
}
@@ -4170,53 +4220,57 @@ public Builder clone() {
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
- result.vertexName_ = vertexName_;
+ result.dagId_ = dagId_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
+ result.vertexName_ = vertexName_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
if (processorDescriptorBuilder_ == null) {
result.processorDescriptor_ = processorDescriptor_;
} else {
result.processorDescriptor_ = processorDescriptorBuilder_.build();
}
if (inputSpecsBuilder_ == null) {
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
inputSpecs_ = java.util.Collections.unmodifiableList(inputSpecs_);
- bitField0_ = (bitField0_ & ~0x00000010);
+ bitField0_ = (bitField0_ & ~0x00000020);
}
result.inputSpecs_ = inputSpecs_;
} else {
result.inputSpecs_ = inputSpecsBuilder_.build();
}
if (outputSpecsBuilder_ == null) {
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
outputSpecs_ = java.util.Collections.unmodifiableList(outputSpecs_);
- bitField0_ = (bitField0_ & ~0x00000020);
+ bitField0_ = (bitField0_ & ~0x00000040);
}
result.outputSpecs_ = outputSpecs_;
} else {
result.outputSpecs_ = outputSpecsBuilder_.build();
}
if (groupedInputSpecsBuilder_ == null) {
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
groupedInputSpecs_ = java.util.Collections.unmodifiableList(groupedInputSpecs_);
- bitField0_ = (bitField0_ & ~0x00000040);
+ bitField0_ = (bitField0_ & ~0x00000080);
}
result.groupedInputSpecs_ = groupedInputSpecs_;
} else {
result.groupedInputSpecs_ = groupedInputSpecsBuilder_.build();
}
- if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
- to_bitField0_ |= 0x00000010;
- }
- result.vertexParallelism_ = vertexParallelism_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000020;
}
- result.fragmentNumber_ = fragmentNumber_;
+ result.vertexParallelism_ = vertexParallelism_;
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000040;
}
+ result.fragmentNumber_ = fragmentNumber_;
+ if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+ to_bitField0_ |= 0x00000080;
+ }
result.attemptNumber_ = attemptNumber_;
result.bitField0_ = to_bitField0_;
onBuilt();
@@ -4244,8 +4298,11 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
dagName_ = other.dagName_;
onChanged();
}
+ if (other.hasDagId()) {
+ setDagId(other.getDagId());
+ }
if (other.hasVertexName()) {
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000008;
vertexName_ = other.vertexName_;
onChanged();
}
@@ -4256,7 +4313,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
if (!other.inputSpecs_.isEmpty()) {
if (inputSpecs_.isEmpty()) {
inputSpecs_ = other.inputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000010);
+ bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureInputSpecsIsMutable();
inputSpecs_.addAll(other.inputSpecs_);
@@ -4269,7 +4326,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
inputSpecsBuilder_.dispose();
inputSpecsBuilder_ = null;
inputSpecs_ = other.inputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000010);
+ bitField0_ = (bitField0_ & ~0x00000020);
inputSpecsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getInputSpecsFieldBuilder() : null;
@@ -4282,7 +4339,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
if (!other.outputSpecs_.isEmpty()) {
if (outputSpecs_.isEmpty()) {
outputSpecs_ = other.outputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000020);
+ bitField0_ = (bitField0_ & ~0x00000040);
} else {
ensureOutputSpecsIsMutable();
outputSpecs_.addAll(other.outputSpecs_);
@@ -4295,7 +4352,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
outputSpecsBuilder_.dispose();
outputSpecsBuilder_ = null;
outputSpecs_ = other.outputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000020);
+ bitField0_ = (bitField0_ & ~0x00000040);
outputSpecsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getOutputSpecsFieldBuilder() : null;
@@ -4308,7 +4365,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
if (!other.groupedInputSpecs_.isEmpty()) {
if (groupedInputSpecs_.isEmpty()) {
groupedInputSpecs_ = other.groupedInputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000040);
+ bitField0_ = (bitField0_ & ~0x00000080);
} else {
ensureGroupedInputSpecsIsMutable();
groupedInputSpecs_.addAll(other.groupedInputSpecs_);
@@ -4321,7 +4378,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
groupedInputSpecsBuilder_.dispose();
groupedInputSpecsBuilder_ = null;
groupedInputSpecs_ = other.groupedInputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000040);
+ bitField0_ = (bitField0_ & ~0x00000080);
groupedInputSpecsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getGroupedInputSpecsFieldBuilder() : null;
@@ -4514,13 +4571,46 @@ public Builder setDagNameBytes(
return this;
}
+ // optional int32 dag_id = 11;
+ private int dagId_ ;
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public boolean hasDagId() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public int getDagId() {
+ return dagId_;
+ }
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public Builder setDagId(int value) {
+ bitField0_ |= 0x00000004;
+ dagId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public Builder clearDagId() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ dagId_ = 0;
+ onChanged();
+ return this;
+ }
+
// optional string vertex_name = 3;
private java.lang.Object vertexName_ = "";
/**
* optional string vertex_name = 3;
*/
public boolean hasVertexName() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
+ return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional string vertex_name = 3;
@@ -4560,7 +4650,7 @@ public Builder setVertexName(
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000008;
vertexName_ = value;
onChanged();
return this;
@@ -4569,7 +4659,7 @@ public Builder setVertexName(
* optional string vertex_name = 3;
*/
public Builder clearVertexName() {
- bitField0_ = (bitField0_ & ~0x00000004);
+ bitField0_ = (bitField0_ & ~0x00000008);
vertexName_ = getDefaultInstance().getVertexName();
onChanged();
return this;
@@ -4582,7 +4672,7 @@ public Builder setVertexNameBytes(
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000008;
vertexName_ = value;
onChanged();
return this;
@@ -4596,7 +4686,7 @@ public Builder setVertexNameBytes(
* optional .EntityDescriptorProto processor_descriptor = 4;
*/
public boolean hasProcessorDescriptor() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
+ return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .EntityDescriptorProto processor_descriptor = 4;
@@ -4621,7 +4711,7 @@ public Builder setProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.Lla
} else {
processorDescriptorBuilder_.setMessage(value);
}
- bitField0_ |= 0x00000008;
+ bitField0_ |= 0x00000010;
return this;
}
/**
@@ -4635,7 +4725,7 @@ public Builder setProcessorDescriptor(
} else {
processorDescriptorBuilder_.setMessage(builderForValue.build());
}
- bitField0_ |= 0x00000008;
+ bitField0_ |= 0x00000010;
return this;
}
/**
@@ -4643,7 +4733,7 @@ public Builder setProcessorDescriptor(
*/
public Builder mergeProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) {
if (processorDescriptorBuilder_ == null) {
- if (((bitField0_ & 0x00000008) == 0x00000008) &&
+ if (((bitField0_ & 0x00000010) == 0x00000010) &&
processorDescriptor_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance()) {
processorDescriptor_ =
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder(processorDescriptor_).mergeFrom(value).buildPartial();
@@ -4654,7 +4744,7 @@ public Builder mergeProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.L
} else {
processorDescriptorBuilder_.mergeFrom(value);
}
- bitField0_ |= 0x00000008;
+ bitField0_ |= 0x00000010;
return this;
}
/**
@@ -4667,14 +4757,14 @@ public Builder clearProcessorDescriptor() {
} else {
processorDescriptorBuilder_.clear();
}
- bitField0_ = (bitField0_ & ~0x00000008);
+ bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* optional .EntityDescriptorProto processor_descriptor = 4;
*/
public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder getProcessorDescriptorBuilder() {
- bitField0_ |= 0x00000008;
+ bitField0_ |= 0x00000010;
onChanged();
return getProcessorDescriptorFieldBuilder().getBuilder();
}
@@ -4709,9 +4799,9 @@ public Builder clearProcessorDescriptor() {
private java.util.List inputSpecs_ =
java.util.Collections.emptyList();
private void ensureInputSpecsIsMutable() {
- if (!((bitField0_ & 0x00000010) == 0x00000010)) {
+ if (!((bitField0_ & 0x00000020) == 0x00000020)) {
inputSpecs_ = new java.util.ArrayList(inputSpecs_);
- bitField0_ |= 0x00000010;
+ bitField0_ |= 0x00000020;
}
}
@@ -4860,7 +4950,7 @@ public Builder addAllInputSpecs(
public Builder clearInputSpecs() {
if (inputSpecsBuilder_ == null) {
inputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000010);
+ bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
} else {
inputSpecsBuilder_.clear();
@@ -4937,7 +5027,7 @@ public Builder removeInputSpecs(int index) {
inputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>(
inputSpecs_,
- ((bitField0_ & 0x00000010) == 0x00000010),
+ ((bitField0_ & 0x00000020) == 0x00000020),
getParentForChildren(),
isClean());
inputSpecs_ = null;
@@ -4949,9 +5039,9 @@ public Builder removeInputSpecs(int index) {
private java.util.List outputSpecs_ =
java.util.Collections.emptyList();
private void ensureOutputSpecsIsMutable() {
- if (!((bitField0_ & 0x00000020) == 0x00000020)) {
+ if (!((bitField0_ & 0x00000040) == 0x00000040)) {
outputSpecs_ = new java.util.ArrayList(outputSpecs_);
- bitField0_ |= 0x00000020;
+ bitField0_ |= 0x00000040;
}
}
@@ -5100,7 +5190,7 @@ public Builder addAllOutputSpecs(
public Builder clearOutputSpecs() {
if (outputSpecsBuilder_ == null) {
outputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000020);
+ bitField0_ = (bitField0_ & ~0x00000040);
onChanged();
} else {
outputSpecsBuilder_.clear();
@@ -5177,7 +5267,7 @@ public Builder removeOutputSpecs(int index) {
outputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>(
outputSpecs_,
- ((bitField0_ & 0x00000020) == 0x00000020),
+ ((bitField0_ & 0x00000040) == 0x00000040),
getParentForChildren(),
isClean());
outputSpecs_ = null;
@@ -5189,9 +5279,9 @@ public Builder removeOutputSpecs(int index) {
private java.util.List groupedInputSpecs_ =
java.util.Collections.emptyList();
private void ensureGroupedInputSpecsIsMutable() {
- if (!((bitField0_ & 0x00000040) == 0x00000040)) {
+ if (!((bitField0_ & 0x00000080) == 0x00000080)) {
groupedInputSpecs_ = new java.util.ArrayList(groupedInputSpecs_);
- bitField0_ |= 0x00000040;
+ bitField0_ |= 0x00000080;
}
}
@@ -5340,7 +5430,7 @@ public Builder addAllGroupedInputSpecs(
public Builder clearGroupedInputSpecs() {
if (groupedInputSpecsBuilder_ == null) {
groupedInputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000040);
+ bitField0_ = (bitField0_ & ~0x00000080);
onChanged();
} else {
groupedInputSpecsBuilder_.clear();
@@ -5417,7 +5507,7 @@ public Builder removeGroupedInputSpecs(int index) {
groupedInputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder>(
groupedInputSpecs_,
- ((bitField0_ & 0x00000040) == 0x00000040),
+ ((bitField0_ & 0x00000080) == 0x00000080),
getParentForChildren(),
isClean());
groupedInputSpecs_ = null;
@@ -5431,7 +5521,7 @@ public Builder removeGroupedInputSpecs(int index) {
* optional int32 vertex_parallelism = 8;
*/
public boolean hasVertexParallelism() {
- return ((bitField0_ & 0x00000080) == 0x00000080);
+ return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional int32 vertex_parallelism = 8;
@@ -5443,7 +5533,7 @@ public int getVertexParallelism() {
* optional int32 vertex_parallelism = 8;
*/
public Builder setVertexParallelism(int value) {
- bitField0_ |= 0x00000080;
+ bitField0_ |= 0x00000100;
vertexParallelism_ = value;
onChanged();
return this;
@@ -5452,7 +5542,7 @@ public Builder setVertexParallelism(int value) {
* optional int32 vertex_parallelism = 8;
*/
public Builder clearVertexParallelism() {
- bitField0_ = (bitField0_ & ~0x00000080);
+ bitField0_ = (bitField0_ & ~0x00000100);
vertexParallelism_ = 0;
onChanged();
return this;
@@ -5464,7 +5554,7 @@ public Builder clearVertexParallelism() {
* optional int32 fragment_number = 9;
*/
public boolean hasFragmentNumber() {
- return ((bitField0_ & 0x00000100) == 0x00000100);
+ return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional int32 fragment_number = 9;
@@ -5476,7 +5566,7 @@ public int getFragmentNumber() {
* optional int32 fragment_number = 9;
*/
public Builder setFragmentNumber(int value) {
- bitField0_ |= 0x00000100;
+ bitField0_ |= 0x00000200;
fragmentNumber_ = value;
onChanged();
return this;
@@ -5485,7 +5575,7 @@ public Builder setFragmentNumber(int value) {
* optional int32 fragment_number = 9;
*/
public Builder clearFragmentNumber() {
- bitField0_ = (bitField0_ & ~0x00000100);
+ bitField0_ = (bitField0_ & ~0x00000200);
fragmentNumber_ = 0;
onChanged();
return this;
@@ -5497,7 +5587,7 @@ public Builder clearFragmentNumber() {
* optional int32 attempt_number = 10;
*/
public boolean hasAttemptNumber() {
- return ((bitField0_ & 0x00000200) == 0x00000200);
+ return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional int32 attempt_number = 10;
@@ -5509,7 +5599,7 @@ public int getAttemptNumber() {
* optional int32 attempt_number = 10;
*/
public Builder setAttemptNumber(int value) {
- bitField0_ |= 0x00000200;
+ bitField0_ |= 0x00000400;
attemptNumber_ = value;
onChanged();
return this;
@@ -5518,7 +5608,7 @@ public Builder setAttemptNumber(int value) {
* optional int32 attempt_number = 10;
*/
public Builder clearAttemptNumber() {
- bitField0_ = (bitField0_ & ~0x00000200);
+ bitField0_ = (bitField0_ & ~0x00000400);
attemptNumber_ = 0;
onChanged();
return this;
@@ -6419,76 +6509,675 @@ public Builder clearCurrentAttemptStartTime() {
// @@protoc_insertion_point(class_scope:FragmentRuntimeInfo)
}
- public interface SubmitWorkRequestProtoOrBuilder
+ public interface QueryIdentifierProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // optional string container_id_string = 1;
- /**
- * optional string container_id_string = 1;
- */
- boolean hasContainerIdString();
- /**
- * optional string container_id_string = 1;
- */
- java.lang.String getContainerIdString();
- /**
- * optional string container_id_string = 1;
- */
- com.google.protobuf.ByteString
- getContainerIdStringBytes();
-
- // optional string am_host = 2;
+ // optional string app_identifier = 1;
/**
- * optional string am_host = 2;
+ * optional string app_identifier = 1;
*/
- boolean hasAmHost();
+ boolean hasAppIdentifier();
/**
- * optional string am_host = 2;
+ * optional string app_identifier = 1;
*/
- java.lang.String getAmHost();
+ java.lang.String getAppIdentifier();
/**
- * optional string am_host = 2;
+ * optional string app_identifier = 1;
*/
com.google.protobuf.ByteString
- getAmHostBytes();
+ getAppIdentifierBytes();
- // optional int32 am_port = 3;
+ // optional int32 dag_identifier = 2;
/**
- * optional int32 am_port = 3;
+ * optional int32 dag_identifier = 2;
*/
- boolean hasAmPort();
+ boolean hasDagIdentifier();
/**
- * optional int32 am_port = 3;
+ * optional int32 dag_identifier = 2;
*/
- int getAmPort();
+ int getDagIdentifier();
+ }
+ /**
+ * Protobuf type {@code QueryIdentifierProto}
+ */
+ public static final class QueryIdentifierProto extends
+ com.google.protobuf.GeneratedMessage
+ implements QueryIdentifierProtoOrBuilder {
+ // Use QueryIdentifierProto.newBuilder() to construct.
+ private QueryIdentifierProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private QueryIdentifierProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
- // optional string token_identifier = 4;
- /**
- * optional string token_identifier = 4;
- */
- boolean hasTokenIdentifier();
- /**
- * optional string token_identifier = 4;
- */
- java.lang.String getTokenIdentifier();
- /**
- * optional string token_identifier = 4;
- */
- com.google.protobuf.ByteString
- getTokenIdentifierBytes();
+ private static final QueryIdentifierProto defaultInstance;
+ public static QueryIdentifierProto getDefaultInstance() {
+ return defaultInstance;
+ }
- // optional bytes credentials_binary = 5;
- /**
- * optional bytes credentials_binary = 5;
- */
- boolean hasCredentialsBinary();
- /**
- * optional bytes credentials_binary = 5;
- */
- com.google.protobuf.ByteString getCredentialsBinary();
+ public QueryIdentifierProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
- // optional string user = 6;
- /**
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private QueryIdentifierProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ appIdentifier_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ dagIdentifier_ = input.readInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public QueryIdentifierProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new QueryIdentifierProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string app_identifier = 1;
+ public static final int APP_IDENTIFIER_FIELD_NUMBER = 1;
+ private java.lang.Object appIdentifier_;
+ /**
+ * optional string app_identifier = 1;
+ */
+ public boolean hasAppIdentifier() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public java.lang.String getAppIdentifier() {
+ java.lang.Object ref = appIdentifier_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ appIdentifier_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public com.google.protobuf.ByteString
+ getAppIdentifierBytes() {
+ java.lang.Object ref = appIdentifier_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ appIdentifier_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int32 dag_identifier = 2;
+ public static final int DAG_IDENTIFIER_FIELD_NUMBER = 2;
+ private int dagIdentifier_;
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public boolean hasDagIdentifier() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public int getDagIdentifier() {
+ return dagIdentifier_;
+ }
+
+ private void initFields() {
+ appIdentifier_ = "";
+ dagIdentifier_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getAppIdentifierBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeInt32(2, dagIdentifier_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getAppIdentifierBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(2, dagIdentifier_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto) obj;
+
+ boolean result = true;
+ result = result && (hasAppIdentifier() == other.hasAppIdentifier());
+ if (hasAppIdentifier()) {
+ result = result && getAppIdentifier()
+ .equals(other.getAppIdentifier());
+ }
+ result = result && (hasDagIdentifier() == other.hasDagIdentifier());
+ if (hasDagIdentifier()) {
+ result = result && (getDagIdentifier()
+ == other.getDagIdentifier());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasAppIdentifier()) {
+ hash = (37 * hash) + APP_IDENTIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getAppIdentifier().hashCode();
+ }
+ if (hasDagIdentifier()) {
+ hash = (37 * hash) + DAG_IDENTIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getDagIdentifier();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code QueryIdentifierProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ appIdentifier_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ dagIdentifier_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.appIdentifier_ = appIdentifier_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.dagIdentifier_ = dagIdentifier_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) return this;
+ if (other.hasAppIdentifier()) {
+ bitField0_ |= 0x00000001;
+ appIdentifier_ = other.appIdentifier_;
+ onChanged();
+ }
+ if (other.hasDagIdentifier()) {
+ setDagIdentifier(other.getDagIdentifier());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string app_identifier = 1;
+ private java.lang.Object appIdentifier_ = "";
+ /**
+ * optional string app_identifier = 1;
+ */
+ public boolean hasAppIdentifier() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public java.lang.String getAppIdentifier() {
+ java.lang.Object ref = appIdentifier_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ appIdentifier_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public com.google.protobuf.ByteString
+ getAppIdentifierBytes() {
+ java.lang.Object ref = appIdentifier_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ appIdentifier_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public Builder setAppIdentifier(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ appIdentifier_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public Builder clearAppIdentifier() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ appIdentifier_ = getDefaultInstance().getAppIdentifier();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public Builder setAppIdentifierBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ appIdentifier_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 dag_identifier = 2;
+ private int dagIdentifier_ ;
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public boolean hasDagIdentifier() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public int getDagIdentifier() {
+ return dagIdentifier_;
+ }
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public Builder setDagIdentifier(int value) {
+ bitField0_ |= 0x00000002;
+ dagIdentifier_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public Builder clearDagIdentifier() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ dagIdentifier_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:QueryIdentifierProto)
+ }
+
+ static {
+ defaultInstance = new QueryIdentifierProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:QueryIdentifierProto)
+ }
+
+ public interface SubmitWorkRequestProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string container_id_string = 1;
+ /**
+ * optional string container_id_string = 1;
+ */
+ boolean hasContainerIdString();
+ /**
+ * optional string container_id_string = 1;
+ */
+ java.lang.String getContainerIdString();
+ /**
+ * optional string container_id_string = 1;
+ */
+ com.google.protobuf.ByteString
+ getContainerIdStringBytes();
+
+ // optional string am_host = 2;
+ /**
+ * optional string am_host = 2;
+ */
+ boolean hasAmHost();
+ /**
+ * optional string am_host = 2;
+ */
+ java.lang.String getAmHost();
+ /**
+ * optional string am_host = 2;
+ */
+ com.google.protobuf.ByteString
+ getAmHostBytes();
+
+ // optional int32 am_port = 3;
+ /**
+ * optional int32 am_port = 3;
+ */
+ boolean hasAmPort();
+ /**
+ * optional int32 am_port = 3;
+ */
+ int getAmPort();
+
+ // optional string token_identifier = 4;
+ /**
+ * optional string token_identifier = 4;
+ */
+ boolean hasTokenIdentifier();
+ /**
+ * optional string token_identifier = 4;
+ */
+ java.lang.String getTokenIdentifier();
+ /**
+ * optional string token_identifier = 4;
+ */
+ com.google.protobuf.ByteString
+ getTokenIdentifierBytes();
+
+ // optional bytes credentials_binary = 5;
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ boolean hasCredentialsBinary();
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ com.google.protobuf.ByteString getCredentialsBinary();
+
+ // optional string user = 6;
+ /**
* optional string user = 6;
*/
boolean hasUser();
@@ -8604,20 +9293,19 @@ public Builder mergeFrom(
public interface SourceStateUpdatedRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // optional string dag_name = 1;
+ // optional .QueryIdentifierProto query_identifier = 1;
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- boolean hasDagName();
+ boolean hasQueryIdentifier();
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- java.lang.String getDagName();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier();
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- com.google.protobuf.ByteString
- getDagNameBytes();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder();
// optional string src_name = 2;
/**
@@ -8696,8 +9384,16 @@ private SourceStateUpdatedRequestProto(
break;
}
case 10: {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = queryIdentifier_.toBuilder();
+ }
+ queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(queryIdentifier_);
+ queryIdentifier_ = subBuilder.buildPartial();
+ }
bitField0_ |= 0x00000001;
- dagName_ = input.readBytes();
break;
}
case 18: {
@@ -8756,47 +9452,26 @@ public SourceStateUpdatedRequestProto parsePartialFrom(
}
private int bitField0_;
- // optional string dag_name = 1;
- public static final int DAG_NAME_FIELD_NUMBER = 1;
- private java.lang.Object dagName_;
+ // optional .QueryIdentifierProto query_identifier = 1;
+ public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_;
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public boolean hasDagName() {
+ public boolean hasQueryIdentifier() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- dagName_ = s;
- }
- return s;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ return queryIdentifier_;
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ return queryIdentifier_;
}
// optional string src_name = 2;
@@ -8859,7 +9534,7 @@ public boolean hasState() {
}
private void initFields() {
- dagName_ = "";
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
srcName_ = "";
state_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto.S_SUCCEEDED;
}
@@ -8876,7 +9551,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getDagNameBytes());
+ output.writeMessage(1, queryIdentifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getSrcNameBytes());
@@ -8895,7 +9570,7 @@ public int getSerializedSize() {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getDagNameBytes());
+ .computeMessageSize(1, queryIdentifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
@@ -8928,10 +9603,10 @@ public boolean equals(final java.lang.Object obj) {
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto) obj;
boolean result = true;
- result = result && (hasDagName() == other.hasDagName());
- if (hasDagName()) {
- result = result && getDagName()
- .equals(other.getDagName());
+ result = result && (hasQueryIdentifier() == other.hasQueryIdentifier());
+ if (hasQueryIdentifier()) {
+ result = result && getQueryIdentifier()
+ .equals(other.getQueryIdentifier());
}
result = result && (hasSrcName() == other.hasSrcName());
if (hasSrcName()) {
@@ -8956,9 +9631,9 @@ public int hashCode() {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
- if (hasDagName()) {
- hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
- hash = (53 * hash) + getDagName().hashCode();
+ if (hasQueryIdentifier()) {
+ hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getQueryIdentifier().hashCode();
}
if (hasSrcName()) {
hash = (37 * hash) + SRC_NAME_FIELD_NUMBER;
@@ -9069,6 +9744,7 @@ private Builder(
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getQueryIdentifierFieldBuilder();
}
}
private static Builder create() {
@@ -9077,7 +9753,11 @@ private static Builder create() {
public Builder clear() {
super.clear();
- dagName_ = "";
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ } else {
+ queryIdentifierBuilder_.clear();
+ }
bitField0_ = (bitField0_ & ~0x00000001);
srcName_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
@@ -9114,7 +9794,11 @@ public Builder clone() {
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
- result.dagName_ = dagName_;
+ if (queryIdentifierBuilder_ == null) {
+ result.queryIdentifier_ = queryIdentifier_;
+ } else {
+ result.queryIdentifier_ = queryIdentifierBuilder_.build();
+ }
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
@@ -9139,10 +9823,8 @@ public Builder mergeFrom(com.google.protobuf.Message other) {
public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto other) {
if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance()) return this;
- if (other.hasDagName()) {
- bitField0_ |= 0x00000001;
- dagName_ = other.dagName_;
- onChanged();
+ if (other.hasQueryIdentifier()) {
+ mergeQueryIdentifier(other.getQueryIdentifier());
}
if (other.hasSrcName()) {
bitField0_ |= 0x00000002;
@@ -9179,78 +9861,121 @@ public Builder mergeFrom(
}
private int bitField0_;
- // optional string dag_name = 1;
- private java.lang.Object dagName_ = "";
+ // optional .QueryIdentifierProto query_identifier = 1;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_;
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public boolean hasDagName() {
+ public boolean hasQueryIdentifier() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- dagName_ = s;
- return s;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ return queryIdentifier_;
} else {
- return (java.lang.String) ref;
+ return queryIdentifierBuilder_.getMessage();
}
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
+ public Builder setQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ queryIdentifier_ = value;
+ onChanged();
} else {
- return (com.google.protobuf.ByteString) ref;
+ queryIdentifierBuilder_.setMessage(value);
}
+ bitField0_ |= 0x00000001;
+ return this;
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder setDagName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- dagName_ = value;
- onChanged();
+ public Builder setQueryIdentifier(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder builderForValue) {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = builderForValue.build();
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 1;
+ */
+ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) {
+ queryIdentifier_ =
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial();
+ } else {
+ queryIdentifier_ = value;
+ }
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 1;
+ */
+ public Builder clearQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder clearDagName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- dagName_ = getDefaultInstance().getDagName();
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder getQueryIdentifierBuilder() {
+ bitField0_ |= 0x00000001;
onChanged();
- return this;
+ return getQueryIdentifierFieldBuilder().getBuilder();
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder setDagNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- dagName_ = value;
- onChanged();
- return this;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ if (queryIdentifierBuilder_ != null) {
+ return queryIdentifierBuilder_.getMessageOrBuilder();
+ } else {
+ return queryIdentifier_;
+ }
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>
+ getQueryIdentifierFieldBuilder() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>(
+ queryIdentifier_,
+ getParentForChildren(),
+ isClean());
+ queryIdentifier_ = null;
+ }
+ return queryIdentifierBuilder_;
}
// optional string src_name = 2;
@@ -9730,28 +10455,27 @@ public Builder mergeFrom(
com.google.protobuf.ByteString
getQueryIdBytes();
- // optional string dag_name = 2;
+ // optional .QueryIdentifierProto query_identifier = 2;
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- boolean hasDagName();
+ boolean hasQueryIdentifier();
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- java.lang.String getDagName();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier();
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- com.google.protobuf.ByteString
- getDagNameBytes();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder();
- // optional int64 delete_delay = 3 [default = 0];
+ // optional int64 delete_delay = 4 [default = 0];
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
boolean hasDeleteDelay();
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
long getDeleteDelay();
}
@@ -9812,11 +10536,19 @@ private QueryCompleteRequestProto(
break;
}
case 18: {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = queryIdentifier_.toBuilder();
+ }
+ queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(queryIdentifier_);
+ queryIdentifier_ = subBuilder.buildPartial();
+ }
bitField0_ |= 0x00000002;
- dagName_ = input.readBytes();
break;
}
- case 24: {
+ case 32: {
bitField0_ |= 0x00000004;
deleteDelay_ = input.readInt64();
break;
@@ -9904,60 +10636,39 @@ public boolean hasQueryId() {
}
}
- // optional string dag_name = 2;
- public static final int DAG_NAME_FIELD_NUMBER = 2;
- private java.lang.Object dagName_;
+ // optional .QueryIdentifierProto query_identifier = 2;
+ public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_;
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public boolean hasDagName() {
+ public boolean hasQueryIdentifier() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- dagName_ = s;
- }
- return s;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ return queryIdentifier_;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ return queryIdentifier_;
}
- // optional int64 delete_delay = 3 [default = 0];
- public static final int DELETE_DELAY_FIELD_NUMBER = 3;
+ // optional int64 delete_delay = 4 [default = 0];
+ public static final int DELETE_DELAY_FIELD_NUMBER = 4;
private long deleteDelay_;
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public boolean hasDeleteDelay() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public long getDeleteDelay() {
return deleteDelay_;
@@ -9965,7 +10676,7 @@ public long getDeleteDelay() {
private void initFields() {
queryId_ = "";
- dagName_ = "";
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
deleteDelay_ = 0L;
}
private byte memoizedIsInitialized = -1;
@@ -9984,10 +10695,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
output.writeBytes(1, getQueryIdBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, getDagNameBytes());
+ output.writeMessage(2, queryIdentifier_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeInt64(3, deleteDelay_);
+ output.writeInt64(4, deleteDelay_);
}
getUnknownFields().writeTo(output);
}
@@ -10004,11 +10715,11 @@ public int getSerializedSize() {
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, getDagNameBytes());
+ .computeMessageSize(2, queryIdentifier_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(3, deleteDelay_);
+ .computeInt64Size(4, deleteDelay_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -10038,10 +10749,10 @@ public boolean equals(final java.lang.Object obj) {
result = result && getQueryId()
.equals(other.getQueryId());
}
- result = result && (hasDagName() == other.hasDagName());
- if (hasDagName()) {
- result = result && getDagName()
- .equals(other.getDagName());
+ result = result && (hasQueryIdentifier() == other.hasQueryIdentifier());
+ if (hasQueryIdentifier()) {
+ result = result && getQueryIdentifier()
+ .equals(other.getQueryIdentifier());
}
result = result && (hasDeleteDelay() == other.hasDeleteDelay());
if (hasDeleteDelay()) {
@@ -10065,9 +10776,9 @@ public int hashCode() {
hash = (37 * hash) + QUERY_ID_FIELD_NUMBER;
hash = (53 * hash) + getQueryId().hashCode();
}
- if (hasDagName()) {
- hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
- hash = (53 * hash) + getDagName().hashCode();
+ if (hasQueryIdentifier()) {
+ hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getQueryIdentifier().hashCode();
}
if (hasDeleteDelay()) {
hash = (37 * hash) + DELETE_DELAY_FIELD_NUMBER;
@@ -10174,6 +10885,7 @@ private Builder(
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getQueryIdentifierFieldBuilder();
}
}
private static Builder create() {
@@ -10184,7 +10896,11 @@ public Builder clear() {
super.clear();
queryId_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
- dagName_ = "";
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ } else {
+ queryIdentifierBuilder_.clear();
+ }
bitField0_ = (bitField0_ & ~0x00000002);
deleteDelay_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
@@ -10223,7 +10939,11 @@ public Builder clone() {
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
- result.dagName_ = dagName_;
+ if (queryIdentifierBuilder_ == null) {
+ result.queryIdentifier_ = queryIdentifier_;
+ } else {
+ result.queryIdentifier_ = queryIdentifierBuilder_.build();
+ }
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
@@ -10249,10 +10969,8 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
queryId_ = other.queryId_;
onChanged();
}
- if (other.hasDagName()) {
- bitField0_ |= 0x00000002;
- dagName_ = other.dagName_;
- onChanged();
+ if (other.hasQueryIdentifier()) {
+ mergeQueryIdentifier(other.getQueryIdentifier());
}
if (other.hasDeleteDelay()) {
setDeleteDelay(other.getDeleteDelay());
@@ -10358,96 +11076,139 @@ public Builder setQueryIdBytes(
return this;
}
- // optional string dag_name = 2;
- private java.lang.Object dagName_ = "";
+ // optional .QueryIdentifierProto query_identifier = 2;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_;
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public boolean hasDagName() {
+ public boolean hasQueryIdentifier() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- dagName_ = s;
- return s;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ return queryIdentifier_;
} else {
- return (java.lang.String) ref;
+ return queryIdentifierBuilder_.getMessage();
}
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
+ public Builder setQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ queryIdentifier_ = value;
+ onChanged();
} else {
- return (com.google.protobuf.ByteString) ref;
+ queryIdentifierBuilder_.setMessage(value);
}
+ bitField0_ |= 0x00000002;
+ return this;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public Builder setDagName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- dagName_ = value;
- onChanged();
+ public Builder setQueryIdentifier(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder builderForValue) {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = builderForValue.build();
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
return this;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public Builder clearDagName() {
+ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) {
+ queryIdentifier_ =
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial();
+ } else {
+ queryIdentifier_ = value;
+ }
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 2;
+ */
+ public Builder clearQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.clear();
+ }
bitField0_ = (bitField0_ & ~0x00000002);
- dagName_ = getDefaultInstance().getDagName();
- onChanged();
return this;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public Builder setDagNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- dagName_ = value;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder getQueryIdentifierBuilder() {
+ bitField0_ |= 0x00000002;
onChanged();
- return this;
+ return getQueryIdentifierFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ if (queryIdentifierBuilder_ != null) {
+ return queryIdentifierBuilder_.getMessageOrBuilder();
+ } else {
+ return queryIdentifier_;
+ }
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>
+ getQueryIdentifierFieldBuilder() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>(
+ queryIdentifier_,
+ getParentForChildren(),
+ isClean());
+ queryIdentifier_ = null;
+ }
+ return queryIdentifierBuilder_;
}
- // optional int64 delete_delay = 3 [default = 0];
+ // optional int64 delete_delay = 4 [default = 0];
private long deleteDelay_ ;
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public boolean hasDeleteDelay() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public long getDeleteDelay() {
return deleteDelay_;
}
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public Builder setDeleteDelay(long value) {
bitField0_ |= 0x00000004;
@@ -10456,7 +11217,7 @@ public Builder setDeleteDelay(long value) {
return this;
}
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public Builder clearDeleteDelay() {
bitField0_ = (bitField0_ & ~0x00000004);
@@ -10817,47 +11578,31 @@ public Builder mergeFrom(
public interface TerminateFragmentRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // optional string query_id = 1;
- /**
- * optional string query_id = 1;
- */
- boolean hasQueryId();
- /**
- * optional string query_id = 1;
- */
- java.lang.String getQueryId();
- /**
- * optional string query_id = 1;
- */
- com.google.protobuf.ByteString
- getQueryIdBytes();
-
- // optional string dag_name = 2;
+ // optional .QueryIdentifierProto query_identifier = 1;
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- boolean hasDagName();
+ boolean hasQueryIdentifier();
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- java.lang.String getDagName();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier();
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- com.google.protobuf.ByteString
- getDagNameBytes();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder();
- // optional string fragment_identifier_string = 7;
+ // optional string fragment_identifier_string = 2;
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
boolean hasFragmentIdentifierString();
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
java.lang.String getFragmentIdentifierString();
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
com.google.protobuf.ByteString
getFragmentIdentifierStringBytes();
@@ -10914,17 +11659,20 @@ private TerminateFragmentRequestProto(
break;
}
case 10: {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = queryIdentifier_.toBuilder();
+ }
+ queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(queryIdentifier_);
+ queryIdentifier_ = subBuilder.buildPartial();
+ }
bitField0_ |= 0x00000001;
- queryId_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
- dagName_ = input.readBytes();
- break;
- }
- case 58: {
- bitField0_ |= 0x00000004;
fragmentIdentifierString_ = input.readBytes();
break;
}
@@ -10962,109 +11710,45 @@ public TerminateFragmentRequestProto parsePartialFrom(
}
};
- @java.lang.Override
- public com.google.protobuf.Parser getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // optional string query_id = 1;
- public static final int QUERY_ID_FIELD_NUMBER = 1;
- private java.lang.Object queryId_;
- /**
- * optional string query_id = 1;
- */
- public boolean hasQueryId() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * optional string query_id = 1;
- */
- public java.lang.String getQueryId() {
- java.lang.Object ref = queryId_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- queryId_ = s;
- }
- return s;
- }
- }
- /**
- * optional string query_id = 1;
- */
- public com.google.protobuf.ByteString
- getQueryIdBytes() {
- java.lang.Object ref = queryId_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- queryId_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
}
- // optional string dag_name = 2;
- public static final int DAG_NAME_FIELD_NUMBER = 2;
- private java.lang.Object dagName_;
+ private int bitField0_;
+ // optional .QueryIdentifierProto query_identifier = 1;
+ public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_;
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public boolean hasDagName() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
+ public boolean hasQueryIdentifier() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- dagName_ = s;
- }
- return s;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ return queryIdentifier_;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ return queryIdentifier_;
}
- // optional string fragment_identifier_string = 7;
- public static final int FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER = 7;
+ // optional string fragment_identifier_string = 2;
+ public static final int FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER = 2;
private java.lang.Object fragmentIdentifierString_;
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public boolean hasFragmentIdentifierString() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
+ return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public java.lang.String getFragmentIdentifierString() {
java.lang.Object ref = fragmentIdentifierString_;
@@ -11081,7 +11765,7 @@ public boolean hasFragmentIdentifierString() {
}
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public com.google.protobuf.ByteString
getFragmentIdentifierStringBytes() {
@@ -11098,8 +11782,7 @@ public boolean hasFragmentIdentifierString() {
}
private void initFields() {
- queryId_ = "";
- dagName_ = "";
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
fragmentIdentifierString_ = "";
}
private byte memoizedIsInitialized = -1;
@@ -11115,13 +11798,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getQueryIdBytes());
+ output.writeMessage(1, queryIdentifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, getDagNameBytes());
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeBytes(7, getFragmentIdentifierStringBytes());
+ output.writeBytes(2, getFragmentIdentifierStringBytes());
}
getUnknownFields().writeTo(output);
}
@@ -11134,15 +11814,11 @@ public int getSerializedSize() {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getQueryIdBytes());
+ .computeMessageSize(1, queryIdentifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, getDagNameBytes());
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(7, getFragmentIdentifierStringBytes());
+ .computeBytesSize(2, getFragmentIdentifierStringBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -11167,15 +11843,10 @@ public boolean equals(final java.lang.Object obj) {
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto) obj;
boolean result = true;
- result = result && (hasQueryId() == other.hasQueryId());
- if (hasQueryId()) {
- result = result && getQueryId()
- .equals(other.getQueryId());
- }
- result = result && (hasDagName() == other.hasDagName());
- if (hasDagName()) {
- result = result && getDagName()
- .equals(other.getDagName());
+ result = result && (hasQueryIdentifier() == other.hasQueryIdentifier());
+ if (hasQueryIdentifier()) {
+ result = result && getQueryIdentifier()
+ .equals(other.getQueryIdentifier());
}
result = result && (hasFragmentIdentifierString() == other.hasFragmentIdentifierString());
if (hasFragmentIdentifierString()) {
@@ -11195,13 +11866,9 @@ public int hashCode() {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
- if (hasQueryId()) {
- hash = (37 * hash) + QUERY_ID_FIELD_NUMBER;
- hash = (53 * hash) + getQueryId().hashCode();
- }
- if (hasDagName()) {
- hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
- hash = (53 * hash) + getDagName().hashCode();
+ if (hasQueryIdentifier()) {
+ hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getQueryIdentifier().hashCode();
}
if (hasFragmentIdentifierString()) {
hash = (37 * hash) + FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER;
@@ -11308,6 +11975,7 @@ private Builder(
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getQueryIdentifierFieldBuilder();
}
}
private static Builder create() {
@@ -11316,12 +11984,14 @@ private static Builder create() {
public Builder clear() {
super.clear();
- queryId_ = "";
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ } else {
+ queryIdentifierBuilder_.clear();
+ }
bitField0_ = (bitField0_ & ~0x00000001);
- dagName_ = "";
- bitField0_ = (bitField0_ & ~0x00000002);
fragmentIdentifierString_ = "";
- bitField0_ = (bitField0_ & ~0x00000004);
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@@ -11353,14 +12023,14 @@ public Builder clone() {
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
- result.queryId_ = queryId_;
+ if (queryIdentifierBuilder_ == null) {
+ result.queryIdentifier_ = queryIdentifier_;
+ } else {
+ result.queryIdentifier_ = queryIdentifierBuilder_.build();
+ }
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
- result.dagName_ = dagName_;
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
- to_bitField0_ |= 0x00000004;
- }
result.fragmentIdentifierString_ = fragmentIdentifierString_;
result.bitField0_ = to_bitField0_;
onBuilt();
@@ -11378,18 +12048,11 @@ public Builder mergeFrom(com.google.protobuf.Message other) {
public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto other) {
if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance()) return this;
- if (other.hasQueryId()) {
- bitField0_ |= 0x00000001;
- queryId_ = other.queryId_;
- onChanged();
- }
- if (other.hasDagName()) {
- bitField0_ |= 0x00000002;
- dagName_ = other.dagName_;
- onChanged();
+ if (other.hasQueryIdentifier()) {
+ mergeQueryIdentifier(other.getQueryIdentifier());
}
if (other.hasFragmentIdentifierString()) {
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000002;
fragmentIdentifierString_ = other.fragmentIdentifierString_;
onChanged();
}
@@ -11420,164 +12083,133 @@ public Builder mergeFrom(
}
private int bitField0_;
- // optional string query_id = 1;
- private java.lang.Object queryId_ = "";
+ // optional .QueryIdentifierProto query_identifier = 1;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_;
/**
- * optional string query_id = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public boolean hasQueryId() {
+ public boolean hasQueryIdentifier() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * optional string query_id = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public java.lang.String getQueryId() {
- java.lang.Object ref = queryId_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- queryId_ = s;
- return s;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ return queryIdentifier_;
} else {
- return (java.lang.String) ref;
+ return queryIdentifierBuilder_.getMessage();
}
}
/**
- * optional string query_id = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public com.google.protobuf.ByteString
- getQueryIdBytes() {
- java.lang.Object ref = queryId_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- queryId_ = b;
- return b;
+ public Builder setQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ queryIdentifier_ = value;
+ onChanged();
} else {
- return (com.google.protobuf.ByteString) ref;
+ queryIdentifierBuilder_.setMessage(value);
}
- }
- /**
- * optional string query_id = 1;
- */
- public Builder setQueryId(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- queryId_ = value;
- onChanged();
- return this;
- }
- /**
- * optional string query_id = 1;
- */
- public Builder clearQueryId() {
- bitField0_ = (bitField0_ & ~0x00000001);
- queryId_ = getDefaultInstance().getQueryId();
- onChanged();
+ bitField0_ |= 0x00000001;
return this;
}
/**
- * optional string query_id = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder setQueryIdBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- queryId_ = value;
- onChanged();
+ public Builder setQueryIdentifier(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder builderForValue) {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = builderForValue.build();
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
return this;
}
-
- // optional string dag_name = 2;
- private java.lang.Object dagName_ = "";
- /**
- * optional string dag_name = 2;
- */
- public boolean hasDagName() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- dagName_ = s;
- return s;
+ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) {
+ queryIdentifier_ =
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial();
+ } else {
+ queryIdentifier_ = value;
+ }
+ onChanged();
} else {
- return (java.lang.String) ref;
+ queryIdentifierBuilder_.mergeFrom(value);
}
+ bitField0_ |= 0x00000001;
+ return this;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
+ public Builder clearQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ onChanged();
} else {
- return (com.google.protobuf.ByteString) ref;
+ queryIdentifierBuilder_.clear();
}
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder setDagName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- dagName_ = value;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder getQueryIdentifierBuilder() {
+ bitField0_ |= 0x00000001;
onChanged();
- return this;
+ return getQueryIdentifierFieldBuilder().getBuilder();
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder clearDagName() {
- bitField0_ = (bitField0_ & ~0x00000002);
- dagName_ = getDefaultInstance().getDagName();
- onChanged();
- return this;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ if (queryIdentifierBuilder_ != null) {
+ return queryIdentifierBuilder_.getMessageOrBuilder();
+ } else {
+ return queryIdentifier_;
+ }
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder setDagNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- dagName_ = value;
- onChanged();
- return this;
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>
+ getQueryIdentifierFieldBuilder() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>(
+ queryIdentifier_,
+ getParentForChildren(),
+ isClean());
+ queryIdentifier_ = null;
+ }
+ return queryIdentifierBuilder_;
}
- // optional string fragment_identifier_string = 7;
+ // optional string fragment_identifier_string = 2;
private java.lang.Object fragmentIdentifierString_ = "";
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public boolean hasFragmentIdentifierString() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
+ return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public java.lang.String getFragmentIdentifierString() {
java.lang.Object ref = fragmentIdentifierString_;
@@ -11591,7 +12223,7 @@ public boolean hasFragmentIdentifierString() {
}
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public com.google.protobuf.ByteString
getFragmentIdentifierStringBytes() {
@@ -11607,36 +12239,36 @@ public boolean hasFragmentIdentifierString() {
}
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public Builder setFragmentIdentifierString(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000002;
fragmentIdentifierString_ = value;
onChanged();
return this;
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public Builder clearFragmentIdentifierString() {
- bitField0_ = (bitField0_ & ~0x00000004);
+ bitField0_ = (bitField0_ & ~0x00000002);
fragmentIdentifierString_ = getDefaultInstance().getFragmentIdentifierString();
onChanged();
return this;
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public Builder setFragmentIdentifierStringBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000002;
fragmentIdentifierString_ = value;
onChanged();
return this;
@@ -13474,6 +14106,11 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_FragmentRuntimeInfo_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_QueryIdentifierProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_QueryIdentifierProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_SubmitWorkRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -13532,66 +14169,71 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
descriptor;
static {
java.lang.String[] descriptorData = {
- "\n\030LlapDaemonProtocol.proto\"9\n\020UserPayloa" +
- "dProto\022\024\n\014user_payload\030\001 \001(\014\022\017\n\007version\030" +
- "\002 \001(\005\"j\n\025EntityDescriptorProto\022\022\n\nclass_" +
- "name\030\001 \001(\t\022\'\n\014user_payload\030\002 \001(\0132\021.UserP" +
- "ayloadProto\022\024\n\014history_text\030\003 \001(\014\"x\n\013IOS" +
- "pecProto\022\035\n\025connected_vertex_name\030\001 \001(\t\022" +
- "-\n\rio_descriptor\030\002 \001(\0132\026.EntityDescripto" +
- "rProto\022\033\n\023physical_edge_count\030\003 \001(\005\"z\n\023G" +
- "roupInputSpecProto\022\022\n\ngroup_name\030\001 \001(\t\022\026" +
- "\n\016group_vertices\030\002 \003(\t\0227\n\027merged_input_d",
- "escriptor\030\003 \001(\0132\026.EntityDescriptorProto\"" +
- "\333\002\n\021FragmentSpecProto\022\"\n\032fragment_identi" +
- "fier_string\030\001 \001(\t\022\020\n\010dag_name\030\002 \001(\t\022\023\n\013v" +
- "ertex_name\030\003 \001(\t\0224\n\024processor_descriptor" +
- "\030\004 \001(\0132\026.EntityDescriptorProto\022!\n\013input_" +
- "specs\030\005 \003(\0132\014.IOSpecProto\022\"\n\014output_spec" +
- "s\030\006 \003(\0132\014.IOSpecProto\0221\n\023grouped_input_s" +
- "pecs\030\007 \003(\0132\024.GroupInputSpecProto\022\032\n\022vert" +
- "ex_parallelism\030\010 \001(\005\022\027\n\017fragment_number\030" +
- "\t \001(\005\022\026\n\016attempt_number\030\n \001(\005\"\344\001\n\023Fragme",
- "ntRuntimeInfo\022#\n\033num_self_and_upstream_t" +
- "asks\030\001 \001(\005\022-\n%num_self_and_upstream_comp" +
- "leted_tasks\030\002 \001(\005\022\033\n\023within_dag_priority" +
- "\030\003 \001(\005\022\026\n\016dag_start_time\030\004 \001(\003\022 \n\030first_" +
- "attempt_start_time\030\005 \001(\003\022\"\n\032current_atte" +
- "mpt_start_time\030\006 \001(\003\"\266\002\n\026SubmitWorkReque" +
- "stProto\022\033\n\023container_id_string\030\001 \001(\t\022\017\n\007" +
- "am_host\030\002 \001(\t\022\017\n\007am_port\030\003 \001(\005\022\030\n\020token_" +
- "identifier\030\004 \001(\t\022\032\n\022credentials_binary\030\005" +
- " \001(\014\022\014\n\004user\030\006 \001(\t\022\035\n\025application_id_str",
- "ing\030\007 \001(\t\022\032\n\022app_attempt_number\030\010 \001(\005\022)\n" +
- "\rfragment_spec\030\t \001(\0132\022.FragmentSpecProto" +
- "\0223\n\025fragment_runtime_info\030\n \001(\0132\024.Fragme" +
- "ntRuntimeInfo\"\031\n\027SubmitWorkResponseProto" +
- "\"f\n\036SourceStateUpdatedRequestProto\022\020\n\010da" +
- "g_name\030\001 \001(\t\022\020\n\010src_name\030\002 \001(\t\022 \n\005state\030" +
- "\003 \001(\0162\021.SourceStateProto\"!\n\037SourceStateU" +
- "pdatedResponseProto\"X\n\031QueryCompleteRequ" +
- "estProto\022\020\n\010query_id\030\001 \001(\t\022\020\n\010dag_name\030\002" +
- " \001(\t\022\027\n\014delete_delay\030\003 \001(\003:\0010\"\034\n\032QueryCo",
- "mpleteResponseProto\"g\n\035TerminateFragment" +
- "RequestProto\022\020\n\010query_id\030\001 \001(\t\022\020\n\010dag_na" +
- "me\030\002 \001(\t\022\"\n\032fragment_identifier_string\030\007" +
- " \001(\t\" \n\036TerminateFragmentResponseProto\"\026" +
- "\n\024GetTokenRequestProto\"&\n\025GetTokenRespon" +
- "seProto\022\r\n\005token\030\001 \001(\014*2\n\020SourceStatePro" +
- "to\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNING\020\0022\316\002\n\022L" +
- "lapDaemonProtocol\022?\n\nsubmitWork\022\027.Submit" +
- "WorkRequestProto\032\030.SubmitWorkResponsePro" +
- "to\022W\n\022sourceStateUpdated\022\037.SourceStateUp",
- "datedRequestProto\032 .SourceStateUpdatedRe" +
- "sponseProto\022H\n\rqueryComplete\022\032.QueryComp" +
- "leteRequestProto\032\033.QueryCompleteResponse" +
- "Proto\022T\n\021terminateFragment\022\036.TerminateFr" +
- "agmentRequestProto\032\037.TerminateFragmentRe" +
- "sponseProto2]\n\026LlapManagementProtocol\022C\n" +
- "\022getDelegationToken\022\025.GetTokenRequestPro" +
- "to\032\026.GetTokenResponseProtoBH\n&org.apache" +
- ".hadoop.hive.llap.daemon.rpcB\030LlapDaemon" +
- "ProtocolProtos\210\001\001\240\001\001"
+ "\n1llap-server/src/protobuf/LlapDaemonPro" +
+ "tocol.proto\"9\n\020UserPayloadProto\022\024\n\014user_" +
+ "payload\030\001 \001(\014\022\017\n\007version\030\002 \001(\005\"j\n\025Entity" +
+ "DescriptorProto\022\022\n\nclass_name\030\001 \001(\t\022\'\n\014u" +
+ "ser_payload\030\002 \001(\0132\021.UserPayloadProto\022\024\n\014" +
+ "history_text\030\003 \001(\014\"x\n\013IOSpecProto\022\035\n\025con" +
+ "nected_vertex_name\030\001 \001(\t\022-\n\rio_descripto" +
+ "r\030\002 \001(\0132\026.EntityDescriptorProto\022\033\n\023physi" +
+ "cal_edge_count\030\003 \001(\005\"z\n\023GroupInputSpecPr" +
+ "oto\022\022\n\ngroup_name\030\001 \001(\t\022\026\n\016group_vertice",
+ "s\030\002 \003(\t\0227\n\027merged_input_descriptor\030\003 \001(\013" +
+ "2\026.EntityDescriptorProto\"\353\002\n\021FragmentSpe" +
+ "cProto\022\"\n\032fragment_identifier_string\030\001 \001" +
+ "(\t\022\020\n\010dag_name\030\002 \001(\t\022\016\n\006dag_id\030\013 \001(\005\022\023\n\013" +
+ "vertex_name\030\003 \001(\t\0224\n\024processor_descripto" +
+ "r\030\004 \001(\0132\026.EntityDescriptorProto\022!\n\013input" +
+ "_specs\030\005 \003(\0132\014.IOSpecProto\022\"\n\014output_spe" +
+ "cs\030\006 \003(\0132\014.IOSpecProto\0221\n\023grouped_input_" +
+ "specs\030\007 \003(\0132\024.GroupInputSpecProto\022\032\n\022ver" +
+ "tex_parallelism\030\010 \001(\005\022\027\n\017fragment_number",
+ "\030\t \001(\005\022\026\n\016attempt_number\030\n \001(\005\"\344\001\n\023Fragm" +
+ "entRuntimeInfo\022#\n\033num_self_and_upstream_" +
+ "tasks\030\001 \001(\005\022-\n%num_self_and_upstream_com" +
+ "pleted_tasks\030\002 \001(\005\022\033\n\023within_dag_priorit" +
+ "y\030\003 \001(\005\022\026\n\016dag_start_time\030\004 \001(\003\022 \n\030first" +
+ "_attempt_start_time\030\005 \001(\003\022\"\n\032current_att" +
+ "empt_start_time\030\006 \001(\003\"F\n\024QueryIdentifier" +
+ "Proto\022\026\n\016app_identifier\030\001 \001(\t\022\026\n\016dag_ide" +
+ "ntifier\030\002 \001(\005\"\266\002\n\026SubmitWorkRequestProto" +
+ "\022\033\n\023container_id_string\030\001 \001(\t\022\017\n\007am_host",
+ "\030\002 \001(\t\022\017\n\007am_port\030\003 \001(\005\022\030\n\020token_identif" +
+ "ier\030\004 \001(\t\022\032\n\022credentials_binary\030\005 \001(\014\022\014\n" +
+ "\004user\030\006 \001(\t\022\035\n\025application_id_string\030\007 \001" +
+ "(\t\022\032\n\022app_attempt_number\030\010 \001(\005\022)\n\rfragme" +
+ "nt_spec\030\t \001(\0132\022.FragmentSpecProto\0223\n\025fra" +
+ "gment_runtime_info\030\n \001(\0132\024.FragmentRunti" +
+ "meInfo\"\031\n\027SubmitWorkResponseProto\"\205\001\n\036So" +
+ "urceStateUpdatedRequestProto\022/\n\020query_id" +
+ "entifier\030\001 \001(\0132\025.QueryIdentifierProto\022\020\n" +
+ "\010src_name\030\002 \001(\t\022 \n\005state\030\003 \001(\0162\021.SourceS",
+ "tateProto\"!\n\037SourceStateUpdatedResponseP" +
+ "roto\"w\n\031QueryCompleteRequestProto\022\020\n\010que" +
+ "ry_id\030\001 \001(\t\022/\n\020query_identifier\030\002 \001(\0132\025." +
+ "QueryIdentifierProto\022\027\n\014delete_delay\030\004 \001" +
+ "(\003:\0010\"\034\n\032QueryCompleteResponseProto\"t\n\035T" +
+ "erminateFragmentRequestProto\022/\n\020query_id" +
+ "entifier\030\001 \001(\0132\025.QueryIdentifierProto\022\"\n" +
+ "\032fragment_identifier_string\030\002 \001(\t\" \n\036Ter" +
+ "minateFragmentResponseProto\"\026\n\024GetTokenR" +
+ "equestProto\"&\n\025GetTokenResponseProto\022\r\n\005",
+ "token\030\001 \001(\014*2\n\020SourceStateProto\022\017\n\013S_SUC" +
+ "CEEDED\020\001\022\r\n\tS_RUNNING\020\0022\316\002\n\022LlapDaemonPr" +
+ "otocol\022?\n\nsubmitWork\022\027.SubmitWorkRequest" +
+ "Proto\032\030.SubmitWorkResponseProto\022W\n\022sourc" +
+ "eStateUpdated\022\037.SourceStateUpdatedReques" +
+ "tProto\032 .SourceStateUpdatedResponseProto" +
+ "\022H\n\rqueryComplete\022\032.QueryCompleteRequest" +
+ "Proto\032\033.QueryCompleteResponseProto\022T\n\021te" +
+ "rminateFragment\022\036.TerminateFragmentReque" +
+ "stProto\032\037.TerminateFragmentResponseProto",
+ "2]\n\026LlapManagementProtocol\022C\n\022getDelegat" +
+ "ionToken\022\025.GetTokenRequestProto\032\026.GetTok" +
+ "enResponseProtoBH\n&org.apache.hadoop.hiv" +
+ "e.llap.daemon.rpcB\030LlapDaemonProtocolPro" +
+ "tos\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -13627,69 +14269,75 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
internal_static_FragmentSpecProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_FragmentSpecProto_descriptor,
- new java.lang.String[] { "FragmentIdentifierString", "DagName", "VertexName", "ProcessorDescriptor", "InputSpecs", "OutputSpecs", "GroupedInputSpecs", "VertexParallelism", "FragmentNumber", "AttemptNumber", });
+ new java.lang.String[] { "FragmentIdentifierString", "DagName", "DagId", "VertexName", "ProcessorDescriptor", "InputSpecs", "OutputSpecs", "GroupedInputSpecs", "VertexParallelism", "FragmentNumber", "AttemptNumber", });
internal_static_FragmentRuntimeInfo_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_FragmentRuntimeInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_FragmentRuntimeInfo_descriptor,
new java.lang.String[] { "NumSelfAndUpstreamTasks", "NumSelfAndUpstreamCompletedTasks", "WithinDagPriority", "DagStartTime", "FirstAttemptStartTime", "CurrentAttemptStartTime", });
- internal_static_SubmitWorkRequestProto_descriptor =
+ internal_static_QueryIdentifierProto_descriptor =
getDescriptor().getMessageTypes().get(6);
+ internal_static_QueryIdentifierProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_QueryIdentifierProto_descriptor,
+ new java.lang.String[] { "AppIdentifier", "DagIdentifier", });
+ internal_static_SubmitWorkRequestProto_descriptor =
+ getDescriptor().getMessageTypes().get(7);
internal_static_SubmitWorkRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SubmitWorkRequestProto_descriptor,
new java.lang.String[] { "ContainerIdString", "AmHost", "AmPort", "TokenIdentifier", "CredentialsBinary", "User", "ApplicationIdString", "AppAttemptNumber", "FragmentSpec", "FragmentRuntimeInfo", });
internal_static_SubmitWorkResponseProto_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(8);
internal_static_SubmitWorkResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SubmitWorkResponseProto_descriptor,
new java.lang.String[] { });
internal_static_SourceStateUpdatedRequestProto_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(9);
internal_static_SourceStateUpdatedRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SourceStateUpdatedRequestProto_descriptor,
- new java.lang.String[] { "DagName", "SrcName", "State", });
+ new java.lang.String[] { "QueryIdentifier", "SrcName", "State", });
internal_static_SourceStateUpdatedResponseProto_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(10);
internal_static_SourceStateUpdatedResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SourceStateUpdatedResponseProto_descriptor,
new java.lang.String[] { });
internal_static_QueryCompleteRequestProto_descriptor =
- getDescriptor().getMessageTypes().get(10);
+ getDescriptor().getMessageTypes().get(11);
internal_static_QueryCompleteRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_QueryCompleteRequestProto_descriptor,
- new java.lang.String[] { "QueryId", "DagName", "DeleteDelay", });
+ new java.lang.String[] { "QueryId", "QueryIdentifier", "DeleteDelay", });
internal_static_QueryCompleteResponseProto_descriptor =
- getDescriptor().getMessageTypes().get(11);
+ getDescriptor().getMessageTypes().get(12);
internal_static_QueryCompleteResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_QueryCompleteResponseProto_descriptor,
new java.lang.String[] { });
internal_static_TerminateFragmentRequestProto_descriptor =
- getDescriptor().getMessageTypes().get(12);
+ getDescriptor().getMessageTypes().get(13);
internal_static_TerminateFragmentRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TerminateFragmentRequestProto_descriptor,
- new java.lang.String[] { "QueryId", "DagName", "FragmentIdentifierString", });
+ new java.lang.String[] { "QueryIdentifier", "FragmentIdentifierString", });
internal_static_TerminateFragmentResponseProto_descriptor =
- getDescriptor().getMessageTypes().get(13);
+ getDescriptor().getMessageTypes().get(14);
internal_static_TerminateFragmentResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TerminateFragmentResponseProto_descriptor,
new java.lang.String[] { });
internal_static_GetTokenRequestProto_descriptor =
- getDescriptor().getMessageTypes().get(14);
+ getDescriptor().getMessageTypes().get(15);
internal_static_GetTokenRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetTokenRequestProto_descriptor,
new java.lang.String[] { });
internal_static_GetTokenResponseProto_descriptor =
- getDescriptor().getMessageTypes().get(15);
+ getDescriptor().getMessageTypes().get(16);
internal_static_GetTokenResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetTokenResponseProto_descriptor,
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java
index 3c9ad24..f1fc285 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java
@@ -27,6 +27,7 @@
private static final String HISTORY_START_TIME = "StartTime";
private static final String HISTORY_END_TIME = "EndTime";
private static final String HISTORY_DAG_NAME = "DagName";
+ private static final String HISTORY_DAG_ID = "DagId";
private static final String HISTORY_VERTEX_NAME = "VertexName";
private static final String HISTORY_TASK_ID = "TaskId";
private static final String HISTORY_ATTEMPT_ID = "TaskAttemptId";
@@ -41,29 +42,30 @@
public static void logFragmentStart(String applicationIdStr, String containerIdStr,
String hostname,
- String dagName, String vertexName, int taskId,
+ String dagName, int dagIdentifier, String vertexName, int taskId,
int attemptId) {
HISTORY_LOGGER.info(
- constructFragmentStartString(applicationIdStr, containerIdStr, hostname, dagName,
+ constructFragmentStartString(applicationIdStr, containerIdStr, hostname, dagName, dagIdentifier,
vertexName, taskId, attemptId));
}
public static void logFragmentEnd(String applicationIdStr, String containerIdStr, String hostname,
- String dagName, String vertexName, int taskId, int attemptId,
+ String dagName, int dagIdentifier, String vertexName, int taskId, int attemptId,
String threadName, long startTime, boolean failed) {
HISTORY_LOGGER.info(constructFragmentEndString(applicationIdStr, containerIdStr, hostname,
- dagName, vertexName, taskId, attemptId, threadName, startTime, failed));
+ dagName, dagIdentifier, vertexName, taskId, attemptId, threadName, startTime, failed));
}
private static String constructFragmentStartString(String applicationIdStr, String containerIdStr,
- String hostname, String dagName,
+ String hostname, String dagName, int dagIdentifier,
String vertexName, int taskId, int attemptId) {
HistoryLineBuilder lb = new HistoryLineBuilder(EVENT_TYPE_FRAGMENT_START);
lb.addHostName(hostname);
lb.addAppid(applicationIdStr);
lb.addContainerId(containerIdStr);
lb.addDagName(dagName);
+ lb.addDagId(dagIdentifier);
lb.addVertexName(vertexName);
lb.addTaskId(taskId);
lb.addTaskAttemptId(attemptId);
@@ -72,7 +74,7 @@ private static String constructFragmentStartString(String applicationIdStr, Stri
}
private static String constructFragmentEndString(String applicationIdStr, String containerIdStr,
- String hostname, String dagName,
+ String hostname, String dagName, int dagIdentifier,
String vertexName, int taskId, int attemptId,
String threadName, long startTime, boolean succeeded) {
HistoryLineBuilder lb = new HistoryLineBuilder(EVENT_TYPE_FRAGMENT_END);
@@ -80,6 +82,7 @@ private static String constructFragmentEndString(String applicationIdStr, String
lb.addAppid(applicationIdStr);
lb.addContainerId(containerIdStr);
lb.addDagName(dagName);
+ lb.addDagId(dagIdentifier);
lb.addVertexName(vertexName);
lb.addTaskId(taskId);
lb.addTaskAttemptId(attemptId);
@@ -113,6 +116,10 @@ HistoryLineBuilder addDagName(String dagName) {
return setKeyValue(HISTORY_DAG_NAME, dagName);
}
+ HistoryLineBuilder addDagId(int dagId) {
+ return setKeyValue(HISTORY_DAG_ID, String.valueOf(dagId));
+ }
+
HistoryLineBuilder addVertexName(String vertexName) {
return setKeyValue(HISTORY_VERTEX_NAME, vertexName);
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/KilledTaskHandler.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/KilledTaskHandler.java
index 7cb433b..e2caec2 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/KilledTaskHandler.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/KilledTaskHandler.java
@@ -14,6 +14,7 @@
package org.apache.hadoop.hive.llap.daemon;
+import org.apache.hadoop.hive.llap.daemon.impl.QueryIdentifier;
import org.apache.hadoop.security.token.Token;
import org.apache.tez.common.security.JobTokenIdentifier;
import org.apache.tez.dag.records.TezTaskAttemptID;
@@ -24,6 +25,6 @@
// inferred from this.
// Passing in parameters until there's some dag information stored and tracked in the daemon.
void taskKilled(String amLocation, int port, String user,
- Token jobToken, String queryId, String dagName,
+ Token jobToken, QueryIdentifier queryIdentifier,
TezTaskAttemptID taskAttemptId);
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/QueryFailedHandler.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/QueryFailedHandler.java
index 4e62a68..7f9553d 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/QueryFailedHandler.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/QueryFailedHandler.java
@@ -14,7 +14,9 @@
package org.apache.hadoop.hive.llap.daemon;
+import org.apache.hadoop.hive.llap.daemon.impl.QueryIdentifier;
+
public interface QueryFailedHandler {
- public void queryFailed(String queryId, String dagName);
+ public void queryFailed(QueryIdentifier queryIdentifier);
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
index f6711d8..d1ec715 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
@@ -172,9 +172,9 @@ public void serviceStop() {
}
public void registerTask(String amLocation, int port, String user,
- Token jobToken, String queryId, String dagName) {
+ Token jobToken, QueryIdentifier queryIdentifier) {
if (LOG.isTraceEnabled()) {
- LOG.trace("Registering for heartbeat: " + amLocation + ":" + port + " for dagName=" + dagName);
+ LOG.trace("Registering for heartbeat: " + amLocation + ":" + port + " for queryIdentifier=" + queryIdentifier);
}
AMNodeInfo amNodeInfo;
synchronized (knownAppMasters) {
@@ -182,7 +182,7 @@ public void registerTask(String amLocation, int port, String user,
amNodeInfo = knownAppMasters.get(amNodeId);
if (amNodeInfo == null) {
amNodeInfo =
- new AMNodeInfo(amNodeId, user, jobToken, dagName, retryPolicy, retryTimeout, socketFactory,
+ new AMNodeInfo(amNodeId, user, jobToken, queryIdentifier, retryPolicy, retryTimeout, socketFactory,
conf);
knownAppMasters.put(amNodeId, amNodeInfo);
// Add to the queue only the first time this is registered, and on
@@ -190,7 +190,7 @@ public void registerTask(String amLocation, int port, String user,
amNodeInfo.setNextHeartbeatTime(System.currentTimeMillis() + heartbeatInterval);
pendingHeartbeatQueeu.add(amNodeInfo);
}
- amNodeInfo.setCurrentDagName(dagName);
+ amNodeInfo.setCurrentQueryIdentifier(queryIdentifier);
amNodeInfo.incrementAndGetTaskCount();
}
}
@@ -214,12 +214,12 @@ public void unregisterTask(String amLocation, int port) {
}
public void taskKilled(String amLocation, int port, String user, Token jobToken,
- final String queryId, final String dagName, final TezTaskAttemptID taskAttemptId) {
+ final QueryIdentifier queryIdentifier, final TezTaskAttemptID taskAttemptId) {
// Not re-using the connection for the AM heartbeat - which may or may not be open by this point.
// knownAppMasters is used for sending heartbeats for queued tasks. Killed messages use a new connection.
LlapNodeId amNodeId = LlapNodeId.getInstance(amLocation, port);
AMNodeInfo amNodeInfo =
- new AMNodeInfo(amNodeId, user, jobToken, dagName, retryPolicy, retryTimeout, socketFactory,
+ new AMNodeInfo(amNodeId, user, jobToken, queryIdentifier, retryPolicy, retryTimeout, socketFactory,
conf);
// Even if the service hasn't started up. It's OK to make this invocation since this will
@@ -251,8 +251,8 @@ protected Void callInternal() {
synchronized (knownAppMasters) {
if (LOG.isDebugEnabled()) {
LOG.debug(
- "Removing am {} with last associated dag{} from heartbeat with taskCount={}, amFailed={}",
- amNodeInfo.amNodeId, amNodeInfo.getCurrentDagName(), amNodeInfo.getTaskCount(),
+ "Removing am {} with last associated dag {} from heartbeat with taskCount={}, amFailed={}",
+ amNodeInfo.amNodeId, amNodeInfo.getCurrentQueryIdentifier(), amNodeInfo.getTaskCount(),
amNodeInfo.hasAmFailed(), amNodeInfo);
}
knownAppMasters.remove(amNodeInfo.amNodeId);
@@ -272,11 +272,11 @@ public void onSuccess(Void result) {
@Override
public void onFailure(Throwable t) {
- String currentDagName = amNodeInfo.getCurrentDagName();
+ QueryIdentifier currentQueryIdentifier = amNodeInfo.getCurrentQueryIdentifier();
amNodeInfo.setAmFailed(true);
LOG.warn("Heartbeat failed to AM {}. Killing all other tasks for the query={}",
- amNodeInfo.amNodeId, currentDagName, t);
- queryFailedHandler.queryFailed(null, currentDagName);
+ amNodeInfo.amNodeId, currentQueryIdentifier, t);
+ queryFailedHandler.queryFailed(currentQueryIdentifier);
}
});
}
@@ -339,11 +339,11 @@ protected Void callInternal() {
amNodeInfo.getUmbilical().nodeHeartbeat(new Text(nodeId.getHostname()),
nodeId.getPort());
} catch (IOException e) {
- String currentDagName = amNodeInfo.getCurrentDagName();
+ QueryIdentifier currentQueryIdentifier = amNodeInfo.getCurrentQueryIdentifier();
amNodeInfo.setAmFailed(true);
LOG.warn("Failed to communicated with AM at {}. Killing remaining fragments for query {}",
- amNodeInfo.amNodeId, currentDagName, e);
- queryFailedHandler.queryFailed(null, currentDagName);
+ amNodeInfo.amNodeId, currentQueryIdentifier, e);
+ queryFailedHandler.queryFailed(currentQueryIdentifier);
} catch (InterruptedException e) {
if (!isShutdown.get()) {
LOG.warn("Interrupted while trying to send heartbeat to AM {}", amNodeInfo.amNodeId, e);
@@ -370,21 +370,21 @@ protected Void callInternal() {
private final long timeout;
private final SocketFactory socketFactory;
private final AtomicBoolean amFailed = new AtomicBoolean(false);
- private String currentDagName;
+ private QueryIdentifier currentQueryIdentifier;
private LlapTaskUmbilicalProtocol umbilical;
private long nextHeartbeatTime;
public AMNodeInfo(LlapNodeId amNodeId, String user,
Token jobToken,
- String currentDagName,
+ QueryIdentifier currentQueryIdentifier,
RetryPolicy retryPolicy,
long timeout,
SocketFactory socketFactory,
Configuration conf) {
this.user = user;
this.jobToken = jobToken;
- this.currentDagName = currentDagName;
+ this.currentQueryIdentifier = currentQueryIdentifier;
this.retryPolicy = retryPolicy;
this.timeout = timeout;
this.socketFactory = socketFactory;
@@ -439,12 +439,12 @@ int getTaskCount() {
return taskCount.get();
}
- public synchronized String getCurrentDagName() {
- return currentDagName;
+ public synchronized QueryIdentifier getCurrentQueryIdentifier() {
+ return currentQueryIdentifier;
}
- public synchronized void setCurrentDagName(String currentDagName) {
- this.currentDagName = currentDagName;
+ public synchronized void setCurrentQueryIdentifier(QueryIdentifier queryIdentifier) {
+ this.currentQueryIdentifier = queryIdentifier;
}
synchronized void setNextHeartbeatTime(long nextTime) {
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
index 2139bb0..d6b2234 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -28,7 +28,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.configuration.LlapConfiguration;
import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
import org.apache.hadoop.hive.llap.daemon.FragmentCompletionHandler;
import org.apache.hadoop.hive.llap.daemon.HistoryLogger;
@@ -147,7 +146,7 @@ protected void serviceStop() throws Exception {
@Override
public void submitWork(SubmitWorkRequestProto request) throws IOException {
HistoryLogger.logFragmentStart(request.getApplicationIdString(), request.getContainerIdString(),
- localAddress.get().getHostName(), request.getFragmentSpec().getDagName(),
+ localAddress.get().getHostName(), request.getFragmentSpec().getDagName(), request.getFragmentSpec().getDagId(),
request.getFragmentSpec().getVertexName(), request.getFragmentSpec().getFragmentNumber(),
request.getFragmentSpec().getAttemptNumber());
if (LOG.isInfoEnabled()) {
@@ -168,8 +167,10 @@ public void submitWork(SubmitWorkRequestProto request) throws IOException {
fragmentSpec.getFragmentIdentifierString());
int dagIdentifier = taskAttemptId.getTaskID().getVertexID().getDAGId().getId();
+ QueryIdentifier queryIdentifier = new QueryIdentifier(request.getApplicationIdString(), dagIdentifier);
+
QueryFragmentInfo fragmentInfo = queryTracker
- .registerFragment(null, request.getApplicationIdString(), fragmentSpec.getDagName(),
+ .registerFragment(queryIdentifier, request.getApplicationIdString(), fragmentSpec.getDagName(),
dagIdentifier,
fragmentSpec.getVertexName(), fragmentSpec.getFragmentNumber(),
fragmentSpec.getAttemptNumber(), request.getUser(), request.getFragmentSpec());
@@ -224,7 +225,9 @@ public LlapExecutionContext(String hostname, QueryTracker queryTracker) {
@Override
public void initializeHook(TezProcessor source) {
- queryTracker.registerDagQueryId(source.getContext().getDAGName(),
+ queryTracker.registerDagQueryId(
+ new QueryIdentifier(source.getContext().getApplicationId().toString(),
+ source.getContext().getDagIdentifier()),
HiveConf.getVar(source.getConf(), HiveConf.ConfVars.HIVEQUERYID));
}
}
@@ -232,19 +235,25 @@ public void initializeHook(TezProcessor source) {
@Override
public void sourceStateUpdated(SourceStateUpdatedRequestProto request) {
LOG.info("Processing state update: " + stringifySourceStateUpdateRequest(request));
- queryTracker.registerSourceStateChange(request.getDagName(), request.getSrcName(),
+ queryTracker.registerSourceStateChange(
+ new QueryIdentifier(request.getQueryIdentifier().getAppIdentifier(),
+ request.getQueryIdentifier().getDagIdentifier()), request.getSrcName(),
request.getState());
}
@Override
public void queryComplete(QueryCompleteRequestProto request) {
- LOG.info("Processing queryComplete notification for {}", request.getDagName());
+ QueryIdentifier queryIdentifier =
+ new QueryIdentifier(request.getQueryIdentifier().getAppIdentifier(),
+ request.getQueryIdentifier().getDagIdentifier());
+ LOG.info("Processing queryComplete notification for {}", queryIdentifier);
List knownFragments =
- queryTracker.queryComplete(null, request.getDagName(), request.getDeleteDelay());
- LOG.info("DBG: Pending fragment count for completed query {} = {}", request.getDagName(),
+ queryTracker
+ .queryComplete(queryIdentifier, request.getDeleteDelay());
+ LOG.info("DBG: Pending fragment count for completed query {} = {}", queryIdentifier,
knownFragments.size());
for (QueryFragmentInfo fragmentInfo : knownFragments) {
- LOG.info("DBG: Issuing killFragment for completed query {} {}", request.getDagName(),
+ LOG.info("DBG: Issuing killFragment for completed query {} {}", queryIdentifier,
fragmentInfo.getFragmentIdentifierString());
executorService.killFragment(fragmentInfo.getFragmentIdentifierString());
}
@@ -258,7 +267,9 @@ public void terminateFragment(TerminateFragmentRequestProto request) {
private String stringifySourceStateUpdateRequest(SourceStateUpdatedRequestProto request) {
StringBuilder sb = new StringBuilder();
- sb.append("dagName=").append(request.getDagName())
+ QueryIdentifier queryIdentifier = new QueryIdentifier(request.getQueryIdentifier().getAppIdentifier(),
+ request.getQueryIdentifier().getDagIdentifier());
+ sb.append("queryIdentifier=").append(queryIdentifier)
.append(", ").append("sourceName=").append(request.getSrcName())
.append(", ").append("state=").append(request.getState());
return sb.toString();
@@ -324,14 +335,14 @@ public void fragmentComplete(QueryFragmentInfo fragmentInfo) {
}
@Override
- public void queryFailed(String queryId, String dagName) {
- LOG.info("Processing query failed notification for {}", dagName);
+ public void queryFailed(QueryIdentifier queryIdentifier) {
+ LOG.info("Processing query failed notification for {}", queryIdentifier);
List knownFragments =
- queryTracker.queryComplete(queryId, dagName, -1);
- LOG.info("DBG: Pending fragment count for failed query {} = {}", dagName,
+ queryTracker.queryComplete(queryIdentifier, -1);
+ LOG.info("DBG: Pending fragment count for failed query {} = {}", queryIdentifier,
knownFragments.size());
for (QueryFragmentInfo fragmentInfo : knownFragments) {
- LOG.info("DBG: Issuing killFragment for failed query {} {}", dagName,
+ LOG.info("DBG: Issuing killFragment for failed query {} {}", queryIdentifier,
fragmentInfo.getFragmentIdentifierString());
executorService.killFragment(fragmentInfo.getFragmentIdentifierString());
}
@@ -341,9 +352,9 @@ public void queryFailed(String queryId, String dagName) {
@Override
public void taskKilled(String amLocation, int port, String user,
- Token jobToken, String queryId, String dagName,
+ Token jobToken, QueryIdentifier queryIdentifier,
TezTaskAttemptID taskAttemptId) {
- amReporter.taskKilled(amLocation, port, user, jobToken, queryId, dagName, taskAttemptId);
+ amReporter.taskKilled(amLocation, port, user, jobToken, queryIdentifier, taskAttemptId);
}
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index 7ce8ba0..2801b72 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -424,8 +424,8 @@ public void uncaughtException(Thread t, Throwable e) {
private class QueryFailedHandlerProxy implements QueryFailedHandler {
@Override
- public void queryFailed(String queryId, String dagName) {
- containerRunner.queryFailed(queryId, dagName);
+ public void queryFailed(QueryIdentifier queryIdentifier) {
+ containerRunner.queryFailed(queryIdentifier);
}
}
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryIdentifier.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryIdentifier.java
new file mode 100644
index 0000000..dfd859b
--- /dev/null
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryIdentifier.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon.impl;
+
+/**
+ * An identifier for a query, which is unique.
+ */
+public final class QueryIdentifier {
+
+ private final String appIdentifier;
+ private final int dagIdentifier;
+
+
+ public QueryIdentifier(String appIdentifier, int dagIdentifier) {
+ this.appIdentifier = appIdentifier;
+ this.dagIdentifier = dagIdentifier;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass().isAssignableFrom(o.getClass())) {
+ return false;
+ }
+
+ QueryIdentifier that = (QueryIdentifier) o;
+
+ if (dagIdentifier != that.dagIdentifier) {
+ return false;
+ }
+ return appIdentifier.equals(that.appIdentifier);
+
+ }
+
+ @Override
+ public int hashCode() {
+ int result = appIdentifier.hashCode();
+ result = 31 * result + dagIdentifier;
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return "QueryIdentifier{" +
+ "appIdentifier='" + appIdentifier + '\'' +
+ ", dagIdentifier=" + dagIdentifier +
+ '}';
+ }
+}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
index 27f2d4c..8bec95f 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
@@ -37,7 +37,7 @@
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto;
public class QueryInfo {
- private final String queryId;
+ private final QueryIdentifier queryIdentifier;
private final String appIdString;
private final String dagName;
private final int dagIdentifier;
@@ -54,10 +54,10 @@
private final FinishableStateTracker finishableStateTracker = new FinishableStateTracker();
- public QueryInfo(String queryId, String appIdString, String dagName, int dagIdentifier,
+ public QueryInfo(QueryIdentifier queryIdentifier, String appIdString, String dagName, int dagIdentifier,
String user, ConcurrentMap sourceStateMap,
String[] localDirsBase, FileSystem localFs) {
- this.queryId = queryId;
+ this.queryIdentifier = queryIdentifier;
this.appIdString = appIdString;
this.dagName = dagName;
this.dagIdentifier = dagIdentifier;
@@ -67,18 +67,14 @@ public QueryInfo(String queryId, String appIdString, String dagName, int dagIden
this.localFs = localFs;
}
- public String getQueryId() {
- return queryId;
+ public QueryIdentifier getQueryIdentifier() {
+ return queryIdentifier;
}
public String getAppIdString() {
return appIdString;
}
- public String getDagName() {
- return dagName;
- }
-
public int getDagIdentifier() {
return dagIdentifier;
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
index 33d5671..2fd1fdd 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
@@ -48,8 +48,7 @@
private static final Logger LOG = LoggerFactory.getLogger(QueryTracker.class);
private final QueryFileCleaner queryFileCleaner;
- // TODO Make use if the query id for cachin when this is available.
- private final ConcurrentHashMap queryInfoMap = new ConcurrentHashMap<>();
+ private final ConcurrentHashMap queryInfoMap = new ConcurrentHashMap<>();
private final String[] localDirsBase;
private final FileSystem localFs;
@@ -62,20 +61,22 @@
// Alternately - send in an explicit dag start message before any other message is processed.
// Multiple threads communicating from a single AM gets in the way of this.
- // Keeps track of completed dags. Assumes dag names are unique across AMs.
- private final Set completedDagMap = Collections.newSetFromMap(new ConcurrentHashMap());
+ // Keeps track of completed DAGS. QueryIdentifiers need to be unique across applications.
+ private final Set completedDagMap = Collections.newSetFromMap(new ConcurrentHashMap());
private final Lock lock = new ReentrantLock();
- private final ConcurrentMap dagSpecificLocks = new ConcurrentHashMap<>();
+ private final ConcurrentMap dagSpecificLocks = new ConcurrentHashMap<>();
// Tracks various maps for dagCompletions. This is setup here since stateChange messages
// may be processed by a thread which ends up executing before a task.
- private final ConcurrentMap> sourceCompletionMap = new ConcurrentHashMap<>();
+ private final ConcurrentMap> sourceCompletionMap = new ConcurrentHashMap<>();
- // Tracks queryId by dagName. This can only be set when config is parsed in TezProcessor,
+ // Tracks HiveQueryId by QueryIdentifier. This can only be set when config is parsed in TezProcessor.
// all the other existing code passes queryId equal to 0 everywhere.
- private final ConcurrentHashMap dagNameToQueryId = new ConcurrentHashMap<>();
+ // If we switch the runtime and move to parsing the payload in the AM - the actual hive queryId could
+ // be sent over the wire from the AM, and will take the place of AppId+dagId in QueryIdentifier.
+ private final ConcurrentHashMap queryIdentifierToHiveQueryId = new ConcurrentHashMap<>();
public QueryTracker(Configuration conf, String[] localDirsBase) {
super("QueryTracker");
@@ -97,7 +98,7 @@ public QueryTracker(Configuration conf, String[] localDirsBase) {
/**
* Register a new fragment for a specific query
- * @param queryId
+ * @param queryIdentifier
* @param appIdString
* @param dagName
* @param dagIdentifier
@@ -107,24 +108,24 @@ public QueryTracker(Configuration conf, String[] localDirsBase) {
* @param user
* @throws IOException
*/
- QueryFragmentInfo registerFragment(String queryId, String appIdString, String dagName, int dagIdentifier,
+ QueryFragmentInfo registerFragment(QueryIdentifier queryIdentifier, String appIdString, String dagName, int dagIdentifier,
String vertexName, int fragmentNumber, int attemptNumber,
String user, FragmentSpecProto fragmentSpec) throws
IOException {
- ReadWriteLock dagLock = getDagLock(dagName);
+ ReadWriteLock dagLock = getDagLock(queryIdentifier);
dagLock.readLock().lock();
try {
- if (!completedDagMap.contains(dagName)) {
- QueryInfo queryInfo = queryInfoMap.get(dagName);
+ if (!completedDagMap.contains(queryIdentifier)) {
+ QueryInfo queryInfo = queryInfoMap.get(queryIdentifier);
if (queryInfo == null) {
- queryInfo = new QueryInfo(queryId, appIdString, dagName, dagIdentifier, user,
- getSourceCompletionMap(dagName), localDirsBase, localFs);
- queryInfoMap.putIfAbsent(dagName, queryInfo);
+ queryInfo = new QueryInfo(queryIdentifier, appIdString, dagName, dagIdentifier, user,
+ getSourceCompletionMap(queryIdentifier), localDirsBase, localFs);
+ queryInfoMap.putIfAbsent(queryIdentifier, queryInfo);
}
return queryInfo.registerFragment(vertexName, fragmentNumber, attemptNumber, fragmentSpec);
} else {
// Cleanup the dag lock here, since it may have been created after the query completed
- dagSpecificLocks.remove(dagName);
+ dagSpecificLocks.remove(queryIdentifier);
throw new RuntimeException(
"Dag " + dagName + " already complete. Rejecting fragment [" + vertexName + ", " + fragmentNumber +
", " + attemptNumber);
@@ -139,12 +140,12 @@ QueryFragmentInfo registerFragment(String queryId, String appIdString, String da
* @param fragmentInfo
*/
void fragmentComplete(QueryFragmentInfo fragmentInfo) {
- String dagName = fragmentInfo.getQueryInfo().getDagName();
- QueryInfo queryInfo = queryInfoMap.get(dagName);
+ QueryIdentifier qId = fragmentInfo.getQueryInfo().getQueryIdentifier();
+ QueryInfo queryInfo = queryInfoMap.get(qId);
if (queryInfo == null) {
// Possible because a queryComplete message from the AM can come in first - KILL / SUCCESSFUL,
// before the fragmentComplete is reported
- LOG.info("Ignoring fragmentComplete message for unknown query");
+ LOG.info("Ignoring fragmentComplete message for unknown query: {}", qId);
} else {
queryInfo.unregisterFragment(fragmentInfo);
}
@@ -152,42 +153,40 @@ void fragmentComplete(QueryFragmentInfo fragmentInfo) {
/**
* Register completion for a query
- * @param queryId
- * @param dagName
+ * @param queryIdentifier
* @param deleteDelay
*/
- List queryComplete(String queryId, String dagName, long deleteDelay) {
+ List queryComplete(QueryIdentifier queryIdentifier, long deleteDelay) {
if (deleteDelay == -1) {
deleteDelay = defaultDeleteDelaySeconds;
}
- ReadWriteLock dagLock = getDagLock(dagName);
+ ReadWriteLock dagLock = getDagLock(queryIdentifier);
dagLock.writeLock().lock();
try {
- completedDagMap.add(dagName);
- LOG.info("Processing queryComplete for dagName={} with deleteDelay={} seconds", dagName,
+ completedDagMap.add(queryIdentifier);
+ LOG.info("Processing queryComplete for queryIdentifier={} with deleteDelay={} seconds", queryIdentifier,
deleteDelay);
- QueryInfo queryInfo = queryInfoMap.remove(dagName);
+ QueryInfo queryInfo = queryInfoMap.remove(queryIdentifier);
if (queryInfo == null) {
- LOG.warn("Ignoring query complete for unknown dag: {}", dagName);
+ LOG.warn("Ignoring query complete for unknown dag: {}", queryIdentifier);
return Collections.emptyList();
}
String[] localDirs = queryInfo.getLocalDirsNoCreate();
if (localDirs != null) {
for (String localDir : localDirs) {
queryFileCleaner.cleanupDir(localDir, deleteDelay);
- ShuffleHandler.get().unregisterDag(localDir, dagName, queryInfo.getDagIdentifier());
+ ShuffleHandler.get().unregisterDag(localDir, queryInfo.getAppIdString(), queryInfo.getDagIdentifier());
}
}
// Clearing this before sending a kill is OK, since canFinish will change to false.
// Ideally this should be a state machine where kills are issued to the executor,
// and the structures are cleaned up once all tasks complete. New requests, however, should not
// be allowed after a query complete is received.
- sourceCompletionMap.remove(dagName);
- String savedQueryId = dagNameToQueryId.remove(dagName);
- queryId = queryId == null ? savedQueryId : queryId;
- dagSpecificLocks.remove(dagName);
- if (queryId != null) {
- ObjectCacheFactory.removeLlapQueryCache(queryId);
+ sourceCompletionMap.remove(queryIdentifier);
+ String savedQueryId = queryIdentifierToHiveQueryId.remove(queryIdentifier);
+ dagSpecificLocks.remove(queryIdentifier);
+ if (savedQueryId != null) {
+ ObjectCacheFactory.removeLlapQueryCache(savedQueryId);
}
return queryInfo.getRegisteredFragments();
} finally {
@@ -197,13 +196,13 @@ void fragmentComplete(QueryFragmentInfo fragmentInfo) {
/**
* Register an update to a source within an executing dag
- * @param dagName
+ * @param queryIdentifier
* @param sourceName
* @param sourceState
*/
- void registerSourceStateChange(String dagName, String sourceName, SourceStateProto sourceState) {
- getSourceCompletionMap(dagName).put(sourceName, sourceState);
- QueryInfo queryInfo = queryInfoMap.get(dagName);
+ void registerSourceStateChange(QueryIdentifier queryIdentifier, String sourceName, SourceStateProto sourceState) {
+ getSourceCompletionMap(queryIdentifier).put(sourceName, sourceState);
+ QueryInfo queryInfo = queryInfoMap.get(queryIdentifier);
if (queryInfo != null) {
queryInfo.sourceStateUpdated(sourceName);
} else {
@@ -213,13 +212,13 @@ void registerSourceStateChange(String dagName, String sourceName, SourceStatePro
}
- private ReadWriteLock getDagLock(String dagName) {
+ private ReadWriteLock getDagLock(QueryIdentifier queryIdentifier) {
lock.lock();
try {
- ReadWriteLock dagLock = dagSpecificLocks.get(dagName);
+ ReadWriteLock dagLock = dagSpecificLocks.get(queryIdentifier);
if (dagLock == null) {
dagLock = new ReentrantReadWriteLock();
- dagSpecificLocks.put(dagName, dagLock);
+ dagSpecificLocks.put(queryIdentifier, dagLock);
}
return dagLock;
} finally {
@@ -227,19 +226,19 @@ private ReadWriteLock getDagLock(String dagName) {
}
}
- private ConcurrentMap getSourceCompletionMap(String dagName) {
- ConcurrentMap dagMap = sourceCompletionMap.get(dagName);
+ private ConcurrentMap getSourceCompletionMap(QueryIdentifier queryIdentifier) {
+ ConcurrentMap dagMap = sourceCompletionMap.get(queryIdentifier);
if (dagMap == null) {
dagMap = new ConcurrentHashMap<>();
ConcurrentMap old =
- sourceCompletionMap.putIfAbsent(dagName, dagMap);
+ sourceCompletionMap.putIfAbsent(queryIdentifier, dagMap);
dagMap = (old != null) ? old : dagMap;
}
return dagMap;
}
- public void registerDagQueryId(String dagName, String queryId) {
- if (queryId == null) return;
- dagNameToQueryId.putIfAbsent(dagName, queryId);
+ public void registerDagQueryId(QueryIdentifier queryIdentifier, String hiveQueryIdString) {
+ if (hiveQueryIdString == null) return;
+ queryIdentifierToHiveQueryId.putIfAbsent(queryIdentifier, hiveQueryIdString);
}
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index f03a2ff..b60f71f 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -131,7 +131,7 @@ public TaskRunnerCallable(SubmitWorkRequestProto request, QueryFragmentInfo frag
// Register with the AMReporter when the callable is setup. Unregister once it starts running.
if (jobToken != null) {
this.amReporter.registerTask(request.getAmHost(), request.getAmPort(),
- request.getUser(), jobToken, null, request.getFragmentSpec().getDagName());
+ request.getUser(), jobToken, fragmentInfo.getQueryInfo().getQueryIdentifier());
}
this.metrics = metrics;
this.requestId = request.getFragmentSpec().getFragmentIdentifierString();
@@ -297,9 +297,8 @@ public void killTask() {
*/
public void reportTaskKilled() {
killedTaskHandler
- .taskKilled(request.getAmHost(), request.getAmPort(), request.getUser(), jobToken, null,
- taskSpec.getDAGName(),
- taskSpec.getTaskAttemptID());
+ .taskKilled(request.getAmHost(), request.getAmPort(), request.getUser(), jobToken,
+ fragmentInfo.getQueryInfo().getQueryIdentifier(), taskSpec.getTaskAttemptID());
}
public boolean canFinish() {
@@ -428,6 +427,7 @@ public void onSuccess(TaskRunner2Result result) {
HistoryLogger
.logFragmentEnd(request.getApplicationIdString(), request.getContainerIdString(),
executionContext.getHostName(), request.getFragmentSpec().getDagName(),
+ fragmentInfo.getQueryInfo().getDagIdentifier(),
request.getFragmentSpec().getVertexName(),
request.getFragmentSpec().getFragmentNumber(),
request.getFragmentSpec().getAttemptNumber(), taskRunnerCallable.threadName,
@@ -445,6 +445,7 @@ public void onFailure(Throwable t) {
HistoryLogger
.logFragmentEnd(request.getApplicationIdString(), request.getContainerIdString(),
executionContext.getHostName(), request.getFragmentSpec().getDagName(),
+ fragmentInfo.getQueryInfo().getDagIdentifier(),
request.getFragmentSpec().getVertexName(),
request.getFragmentSpec().getFragmentNumber(),
request.getFragmentSpec().getAttemptNumber(), taskRunnerCallable.threadName,
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/Converters.java llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/Converters.java
index 7428a6a..f61d62f 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/Converters.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/Converters.java
@@ -262,5 +262,4 @@ public static SourceStateProto fromVertexState(VertexState state) {
throw new RuntimeException("Unexpected state: " + state);
}
}
-
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
index ce248e9..e4c5c26 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
@@ -35,10 +35,10 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.llap.LlapNodeId;
-import org.apache.hadoop.hive.llap.configuration.LlapConfiguration;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
@@ -82,7 +82,8 @@
private static final Logger LOG = LoggerFactory.getLogger(LlapTaskCommunicator.class);
private final SubmitWorkRequestProto BASE_SUBMIT_WORK_REQUEST;
- private final ConcurrentMap credentialMap;
+
+ private final ConcurrentMap credentialMap;
// Tracks containerIds and taskAttemptIds, so can be kept independent of the running DAG.
// When DAG specific cleanup happens, it'll be better to link this to a DAG though.
@@ -95,7 +96,8 @@
private final LlapTaskUmbilicalProtocol umbilical;
private final Token token;
- private volatile String currentDagName;
+ private volatile int currentDagId;
+ private volatile QueryIdentifierProto currentQueryIdentifierProto;
public LlapTaskCommunicator(
TaskCommunicatorContext taskCommunicatorContext) {
@@ -217,8 +219,9 @@ public void registerRunningTaskAttempt(final ContainerId containerId, final Task
int priority) {
super.registerRunningTaskAttempt(containerId, taskSpec, additionalResources, credentials,
credentialsChanged, priority);
- if (taskSpec.getDAGName() != currentDagName) {
- resetCurrentDag(taskSpec.getDAGName());
+ int dagId = taskSpec.getTaskAttemptID().getTaskID().getVertexID().getDAGId().getId();
+ if (currentQueryIdentifierProto == null || (dagId != currentQueryIdentifierProto.getDagIdentifier())) {
+ resetCurrentDag(dagId);
}
@@ -241,7 +244,7 @@ public void registerRunningTaskAttempt(final ContainerId containerId, final Task
nodesForQuery.add(nodeId);
sourceStateTracker.registerTaskForStateUpdates(host, port, taskSpec.getInputs());
- FragmentRuntimeInfo fragmentRuntimeInfo = sourceStateTracker.getFragmentRuntimeInfo(taskSpec.getDAGName(),
+ FragmentRuntimeInfo fragmentRuntimeInfo = sourceStateTracker.getFragmentRuntimeInfo(
taskSpec.getVertexName(), taskSpec.getTaskAttemptID().getTaskID().getId(), priority);
SubmitWorkRequestProto requestProto;
@@ -334,7 +337,7 @@ private void sendTaskTerminated(final TezTaskAttemptID taskAttemptId,
// NodeId can be null if the task gets unregistered due to failure / being killed by the daemon itself
if (nodeId != null) {
TerminateFragmentRequestProto request =
- TerminateFragmentRequestProto.newBuilder().setDagName(currentDagName)
+ TerminateFragmentRequestProto.newBuilder().setQueryIdentifier(currentQueryIdentifierProto)
.setFragmentIdentifierString(taskAttemptId.toString()).build();
communicator.sendTerminateFragment(request, nodeId.getHostname(), nodeId.getPort(),
new TaskCommunicator.ExecuteRequestCallback() {
@@ -355,12 +358,16 @@ public void indicateError(Throwable t) {
}
}
+
+
+
@Override
- public void dagComplete(final String dagName) {
- QueryCompleteRequestProto request = QueryCompleteRequestProto.newBuilder().setDagName(
- dagName).setDeleteDelay(deleteDelayOnDagComplete).build();
+ public void dagComplete(final int dagIdentifier) {
+ QueryCompleteRequestProto request = QueryCompleteRequestProto.newBuilder()
+ .setQueryIdentifier(constructQueryIdentifierProto(dagIdentifier))
+ .setDeleteDelay(deleteDelayOnDagComplete).build();
for (final LlapNodeId llapNodeId : nodesForQuery) {
- LOG.info("Sending dagComplete message for {}, to {}", dagName, llapNodeId);
+ LOG.info("Sending dagComplete message for {}, to {}", dagIdentifier, llapNodeId);
communicator.sendQueryComplete(request, llapNodeId.getHostname(), llapNodeId.getPort(),
new TaskCommunicator.ExecuteRequestCallback() {
@Override
@@ -369,7 +376,7 @@ public void setResponse(LlapDaemonProtocolProtos.QueryCompleteResponseProto resp
@Override
public void indicateError(Throwable t) {
- LOG.warn("Failed to indicate dag complete dagId={} to node {}", dagName, llapNodeId);
+ LOG.warn("Failed to indicate dag complete dagId={} to node {}", dagIdentifier, llapNodeId);
}
});
}
@@ -407,12 +414,12 @@ public void indicateError(Throwable t) {
- private void resetCurrentDag(String newDagName) {
+ private void resetCurrentDag(int newDagId) {
// Working on the assumption that a single DAG runs at a time per AM.
- currentDagName = newDagName;
- sourceStateTracker.resetState(newDagName);
+ currentQueryIdentifierProto = constructQueryIdentifierProto(newDagId);
+ sourceStateTracker.resetState(newDagId);
nodesForQuery.clear();
- LOG.info("CurrentDag set to: " + newDagName);
+ LOG.info("CurrentDagId set to: " + newDagId + ", name=" + getContext().getCurrentDagName());
// TODO Is it possible for heartbeats to come in from lost tasks - those should be told to die, which
// is likely already happening.
}
@@ -430,10 +437,12 @@ private SubmitWorkRequestProto constructSubmitWorkRequest(ContainerId containerI
// Credentials can change across DAGs. Ideally construct only once per DAG.
taskCredentials.addAll(getContext().getCredentials());
- ByteBuffer credentialsBinary = credentialMap.get(taskSpec.getDAGName());
+ Preconditions.checkState(currentQueryIdentifierProto.getDagIdentifier() ==
+ taskSpec.getTaskAttemptID().getTaskID().getVertexID().getDAGId().getId());
+ ByteBuffer credentialsBinary = credentialMap.get(currentQueryIdentifierProto);
if (credentialsBinary == null) {
credentialsBinary = serializeCredentials(getContext().getCredentials());
- credentialMap.putIfAbsent(taskSpec.getDAGName(), credentialsBinary.duplicate());
+ credentialMap.putIfAbsent(currentQueryIdentifierProto, credentialsBinary.duplicate());
} else {
credentialsBinary = credentialsBinary.duplicate();
}
@@ -634,4 +643,10 @@ void nodePinged(String hostname, int port) {
}
}
}
+
+ private QueryIdentifierProto constructQueryIdentifierProto(int dagIdentifier) {
+ return QueryIdentifierProto.newBuilder()
+ .setAppIdentifier(getContext().getCurrentAppIdentifier()).setDagIdentifier(dagIdentifier)
+ .build();
+ }
}
\ No newline at end of file
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/helpers/SourceStateTracker.java llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/helpers/SourceStateTracker.java
index 066fae5..628fe9c 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/helpers/SourceStateTracker.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/helpers/SourceStateTracker.java
@@ -24,6 +24,8 @@
import java.util.Set;
import org.apache.commons.lang3.mutable.MutableInt;
+import org.apache.hadoop.hive.llap.daemon.impl.QueryIdentifier;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.llap.LlapNodeId;
@@ -45,28 +47,33 @@
private final TaskCommunicatorContext taskCommunicatorContext;
private final LlapTaskCommunicator taskCommunicator;
+ private final QueryIdentifierProto BASE_QUERY_IDENTIFIER;
+
// Tracks vertices for which notifications have been registered
private final Set notificationRegisteredVertices = new HashSet<>();
private final Map sourceInfoMap = new HashMap<>();
private final Map nodeInfoMap = new HashMap<>();
- private volatile String currentDagName;
+ private volatile QueryIdentifierProto currentQueryIdentifier;
public SourceStateTracker(TaskCommunicatorContext taskCommunicatorContext,
LlapTaskCommunicator taskCommunicator) {
this.taskCommunicatorContext = taskCommunicatorContext;
this.taskCommunicator = taskCommunicator;
+ BASE_QUERY_IDENTIFIER = QueryIdentifierProto.newBuilder()
+ .setAppIdentifier(taskCommunicatorContext.getCurrentAppIdentifier()).build();
}
/**
* To be invoked after each DAG completes.
*/
- public synchronized void resetState(String newDagName) {
+ public synchronized void resetState(int newDagId) {
sourceInfoMap.clear();
nodeInfoMap.clear();
notificationRegisteredVertices.clear();
- this.currentDagName = newDagName;
+ this.currentQueryIdentifier =
+ QueryIdentifierProto.newBuilder(BASE_QUERY_IDENTIFIER).setDagIdentifier(newDagId).build();
}
/**
@@ -139,16 +146,16 @@ public synchronized void sourceStateUpdated(String sourceName, VertexState sourc
}
+ // Assumes serialized DAGs within an AM, and a reset of structures after each DAG completes.
/**
* Constructs FragmentRuntimeInfo for scheduling within LLAP daemons.
* Also caches state based on state updates.
- * @param dagName
* @param vertexName
* @param fragmentNumber
* @param priority
* @return
*/
- public synchronized FragmentRuntimeInfo getFragmentRuntimeInfo(String dagName, String vertexName, int fragmentNumber,
+ public synchronized FragmentRuntimeInfo getFragmentRuntimeInfo(String vertexName, int fragmentNumber,
int priority) {
FragmentRuntimeInfo.Builder builder = FragmentRuntimeInfo.newBuilder();
maybeRegisterForVertexUpdates(vertexName);
@@ -282,9 +289,8 @@ private boolean isSourceOfInterest(InputSpec inputSpec) {
void sendStateUpdateToNode(LlapNodeId nodeId, String sourceName, VertexState state) {
taskCommunicator.sendStateUpdate(nodeId.getHostname(), nodeId.getPort(),
- SourceStateUpdatedRequestProto.newBuilder().setDagName(currentDagName).setSrcName(
- sourceName)
- .setState(Converters.fromVertexState(state)).build());
+ SourceStateUpdatedRequestProto.newBuilder().setQueryIdentifier(currentQueryIdentifier)
+ .setSrcName(sourceName).setState(Converters.fromVertexState(state)).build());
}
diff --git llap-server/src/protobuf/LlapDaemonProtocol.proto llap-server/src/protobuf/LlapDaemonProtocol.proto
index 07721df..4fde4d3 100644
--- llap-server/src/protobuf/LlapDaemonProtocol.proto
+++ llap-server/src/protobuf/LlapDaemonProtocol.proto
@@ -50,6 +50,7 @@ message GroupInputSpecProto {
message FragmentSpecProto {
optional string fragment_identifier_string = 1;
optional string dag_name = 2;
+ optional int32 dag_id = 11;
optional string vertex_name = 3;
optional EntityDescriptorProto processor_descriptor = 4;
repeated IOSpecProto input_specs = 5;
@@ -74,6 +75,11 @@ enum SourceStateProto {
S_RUNNING = 2;
}
+message QueryIdentifierProto {
+ optional string app_identifier = 1;
+ optional int32 dag_identifier = 2;
+}
+
message SubmitWorkRequestProto {
optional string container_id_string = 1;
optional string am_host = 2;
@@ -91,7 +97,7 @@ message SubmitWorkResponseProto {
}
message SourceStateUpdatedRequestProto {
- optional string dag_name = 1;
+ optional QueryIdentifierProto query_identifier = 1;
optional string src_name = 2;
optional SourceStateProto state = 3;
}
@@ -101,17 +107,16 @@ message SourceStateUpdatedResponseProto {
message QueryCompleteRequestProto {
optional string query_id = 1;
- optional string dag_name = 2;
- optional int64 delete_delay = 3 [default = 0];
+ optional QueryIdentifierProto query_identifier = 2;
+ optional int64 delete_delay = 4 [default = 0];
}
message QueryCompleteResponseProto {
}
message TerminateFragmentRequestProto {
- optional string query_id = 1;
- optional string dag_name = 2;
- optional string fragment_identifier_string = 7;
+ optional QueryIdentifierProto query_identifier = 1;
+ optional string fragment_identifier_string = 2;
}
message TerminateFragmentResponseProto {