diff --git llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java index 211696a0b5..b2ab9ba635 100644 --- llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java +++ llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java @@ -24,8 +24,11 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.llap.AsyncPbRpcProxy; import org.apache.hadoop.hive.llap.LlapNodeId; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto; @@ -55,6 +58,12 @@ public LlapProtocolClientProxy( TimeUnit.MILLISECONDS), -1, 1); } + public void registerDag(RegisterDagRequestProto request, String host, int port, + final ExecuteRequestCallback callback) { + LlapNodeId nodeId = LlapNodeId.getInstance(host, port); + queueRequest(new RegisterDagCallable(nodeId, request, callback)); + } + public void sendSubmitWork(SubmitWorkRequestProto request, String host, int port, final ExecuteRequestCallback callback) { LlapNodeId nodeId = LlapNodeId.getInstance(host, port); @@ -86,6 +95,20 @@ public void sendUpdateFragment(final UpdateFragmentRequestProto request, final S queueRequest(new SendUpdateFragmentCallable(nodeId, request, callback)); } + private class RegisterDagCallable extends + NodeCallableRequest { + protected RegisterDagCallable(LlapNodeId nodeId, + RegisterDagRequestProto registerDagRequestProto, + ExecuteRequestCallback callback) { + super(nodeId, registerDagRequestProto, callback); + } + + @Override public + RegisterDagResponseProto call() throws Exception { + return getProxy(nodeId, null).registerDag(null, request); + } + } + private class SubmitWorkCallable extends NodeCallableRequest { protected SubmitWorkCallable(LlapNodeId nodeId, diff --git llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java index 8fecc1e920..76591404fd 100644 --- llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java +++ llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java @@ -11465,6 +11465,1155 @@ public Builder clearIsGuaranteed() { // @@protoc_insertion_point(class_scope:SubmitWorkRequestProto) } + public interface RegisterDagRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string user = 1; + /** + * optional string user = 1; + */ + boolean hasUser(); + /** + * optional string user = 1; + */ + java.lang.String getUser(); + /** + * optional string user = 1; + */ + com.google.protobuf.ByteString + getUserBytes(); + + // required .QueryIdentifierProto query_identifier = 2; + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + boolean hasQueryIdentifier(); + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier(); + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder(); + + // optional bytes credentials_binary = 3; + /** + * optional bytes credentials_binary = 3; + */ + boolean hasCredentialsBinary(); + /** + * optional bytes credentials_binary = 3; + */ + com.google.protobuf.ByteString getCredentialsBinary(); + } + /** + * Protobuf type {@code RegisterDagRequestProto} + */ + public static final class RegisterDagRequestProto extends + com.google.protobuf.GeneratedMessage + implements RegisterDagRequestProtoOrBuilder { + // Use RegisterDagRequestProto.newBuilder() to construct. + private RegisterDagRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RegisterDagRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RegisterDagRequestProto defaultInstance; + public static RegisterDagRequestProto getDefaultInstance() { + return defaultInstance; + } + + public RegisterDagRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegisterDagRequestProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + user_ = input.readBytes(); + break; + } + case 18: { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = queryIdentifier_.toBuilder(); + } + queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(queryIdentifier_); + queryIdentifier_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + bitField0_ |= 0x00000004; + credentialsBinary_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegisterDagRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegisterDagRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string user = 1; + public static final int USER_FIELD_NUMBER = 1; + private java.lang.Object user_; + /** + * optional string user = 1; + */ + public boolean hasUser() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string user = 1; + */ + public java.lang.String getUser() { + java.lang.Object ref = user_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + user_ = s; + } + return s; + } + } + /** + * optional string user = 1; + */ + public com.google.protobuf.ByteString + getUserBytes() { + java.lang.Object ref = user_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + user_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .QueryIdentifierProto query_identifier = 2; + public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 2; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + public boolean hasQueryIdentifier() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { + return queryIdentifier_; + } + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() { + return queryIdentifier_; + } + + // optional bytes credentials_binary = 3; + public static final int CREDENTIALS_BINARY_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString credentialsBinary_; + /** + * optional bytes credentials_binary = 3; + */ + public boolean hasCredentialsBinary() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bytes credentials_binary = 3; + */ + public com.google.protobuf.ByteString getCredentialsBinary() { + return credentialsBinary_; + } + + private void initFields() { + user_ = ""; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + credentialsBinary_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasQueryIdentifier()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getUserBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, queryIdentifier_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, credentialsBinary_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getUserBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, queryIdentifier_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, credentialsBinary_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto) obj; + + boolean result = true; + result = result && (hasUser() == other.hasUser()); + if (hasUser()) { + result = result && getUser() + .equals(other.getUser()); + } + result = result && (hasQueryIdentifier() == other.hasQueryIdentifier()); + if (hasQueryIdentifier()) { + result = result && getQueryIdentifier() + .equals(other.getQueryIdentifier()); + } + result = result && (hasCredentialsBinary() == other.hasCredentialsBinary()); + if (hasCredentialsBinary()) { + result = result && getCredentialsBinary() + .equals(other.getCredentialsBinary()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUser()) { + hash = (37 * hash) + USER_FIELD_NUMBER; + hash = (53 * hash) + getUser().hashCode(); + } + if (hasQueryIdentifier()) { + hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER; + hash = (53 * hash) + getQueryIdentifier().hashCode(); + } + if (hasCredentialsBinary()) { + hash = (37 * hash) + CREDENTIALS_BINARY_FIELD_NUMBER; + hash = (53 * hash) + getCredentialsBinary().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RegisterDagRequestProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.Builder.class); + } + + // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getQueryIdentifierFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + user_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (queryIdentifierBuilder_ == null) { + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + } else { + queryIdentifierBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + credentialsBinary_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagRequestProto_descriptor; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto build() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto buildPartial() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.user_ = user_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (queryIdentifierBuilder_ == null) { + result.queryIdentifier_ = queryIdentifier_; + } else { + result.queryIdentifier_ = queryIdentifierBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.credentialsBinary_ = credentialsBinary_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto) { + return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto other) { + if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.getDefaultInstance()) return this; + if (other.hasUser()) { + bitField0_ |= 0x00000001; + user_ = other.user_; + onChanged(); + } + if (other.hasQueryIdentifier()) { + mergeQueryIdentifier(other.getQueryIdentifier()); + } + if (other.hasCredentialsBinary()) { + setCredentialsBinary(other.getCredentialsBinary()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasQueryIdentifier()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string user = 1; + private java.lang.Object user_ = ""; + /** + * optional string user = 1; + */ + public boolean hasUser() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string user = 1; + */ + public java.lang.String getUser() { + java.lang.Object ref = user_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + user_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string user = 1; + */ + public com.google.protobuf.ByteString + getUserBytes() { + java.lang.Object ref = user_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + user_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string user = 1; + */ + public Builder setUser( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + user_ = value; + onChanged(); + return this; + } + /** + * optional string user = 1; + */ + public Builder clearUser() { + bitField0_ = (bitField0_ & ~0x00000001); + user_ = getDefaultInstance().getUser(); + onChanged(); + return this; + } + /** + * optional string user = 1; + */ + public Builder setUserBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + user_ = value; + onChanged(); + return this; + } + + // required .QueryIdentifierProto query_identifier = 2; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_; + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + public boolean hasQueryIdentifier() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { + if (queryIdentifierBuilder_ == null) { + return queryIdentifier_; + } else { + return queryIdentifierBuilder_.getMessage(); + } + } + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + public Builder setQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) { + if (queryIdentifierBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + queryIdentifier_ = value; + onChanged(); + } else { + queryIdentifierBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + public Builder setQueryIdentifier( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder builderForValue) { + if (queryIdentifierBuilder_ == null) { + queryIdentifier_ = builderForValue.build(); + onChanged(); + } else { + queryIdentifierBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) { + if (queryIdentifierBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) { + queryIdentifier_ = + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial(); + } else { + queryIdentifier_ = value; + } + onChanged(); + } else { + queryIdentifierBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + public Builder clearQueryIdentifier() { + if (queryIdentifierBuilder_ == null) { + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + onChanged(); + } else { + queryIdentifierBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder getQueryIdentifierBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getQueryIdentifierFieldBuilder().getBuilder(); + } + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() { + if (queryIdentifierBuilder_ != null) { + return queryIdentifierBuilder_.getMessageOrBuilder(); + } else { + return queryIdentifier_; + } + } + /** + * required .QueryIdentifierProto query_identifier = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> + getQueryIdentifierFieldBuilder() { + if (queryIdentifierBuilder_ == null) { + queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>( + queryIdentifier_, + getParentForChildren(), + isClean()); + queryIdentifier_ = null; + } + return queryIdentifierBuilder_; + } + + // optional bytes credentials_binary = 3; + private com.google.protobuf.ByteString credentialsBinary_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes credentials_binary = 3; + */ + public boolean hasCredentialsBinary() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bytes credentials_binary = 3; + */ + public com.google.protobuf.ByteString getCredentialsBinary() { + return credentialsBinary_; + } + /** + * optional bytes credentials_binary = 3; + */ + public Builder setCredentialsBinary(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + credentialsBinary_ = value; + onChanged(); + return this; + } + /** + * optional bytes credentials_binary = 3; + */ + public Builder clearCredentialsBinary() { + bitField0_ = (bitField0_ & ~0x00000004); + credentialsBinary_ = getDefaultInstance().getCredentialsBinary(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:RegisterDagRequestProto) + } + + static { + defaultInstance = new RegisterDagRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegisterDagRequestProto) + } + + public interface RegisterDagResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code RegisterDagResponseProto} + */ + public static final class RegisterDagResponseProto extends + com.google.protobuf.GeneratedMessage + implements RegisterDagResponseProtoOrBuilder { + // Use RegisterDagResponseProto.newBuilder() to construct. + private RegisterDagResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RegisterDagResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RegisterDagResponseProto defaultInstance; + public static RegisterDagResponseProto getDefaultInstance() { + return defaultInstance; + } + + public RegisterDagResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegisterDagResponseProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegisterDagResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegisterDagResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RegisterDagResponseProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.Builder.class); + } + + // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagResponseProto_descriptor; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto build() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto buildPartial() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto) { + return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto other) { + if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:RegisterDagResponseProto) + } + + static { + defaultInstance = new RegisterDagResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegisterDagResponseProto) + } + public interface SubmitWorkResponseProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -18909,6 +20058,14 @@ public Builder clearPurgedMemoryBytes() { protected LlapDaemonProtocol() {} public interface Interface { + /** + * rpc registerDag(.RegisterDagRequestProto) returns (.RegisterDagResponseProto); + */ + public abstract void registerDag( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto request, + com.google.protobuf.RpcCallback done); + /** * rpc submitWork(.SubmitWorkRequestProto) returns (.SubmitWorkResponseProto); */ @@ -18954,6 +20111,14 @@ public abstract void updateFragment( public static com.google.protobuf.Service newReflectiveService( final Interface impl) { return new LlapDaemonProtocol() { + @java.lang.Override + public void registerDag( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.registerDag(controller, request, done); + } + @java.lang.Override public void submitWork( com.google.protobuf.RpcController controller, @@ -19017,14 +20182,16 @@ public void updateFragment( } switch(method.getIndex()) { case 0: - return impl.submitWork(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto)request); + return impl.registerDag(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto)request); case 1: - return impl.sourceStateUpdated(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto)request); + return impl.submitWork(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto)request); case 2: - return impl.queryComplete(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto)request); + return impl.sourceStateUpdated(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto)request); case 3: - return impl.terminateFragment(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto)request); + return impl.queryComplete(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto)request); case 4: + return impl.terminateFragment(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto)request); + case 5: return impl.updateFragment(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -19041,14 +20208,16 @@ public void updateFragment( } switch(method.getIndex()) { case 0: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.getDefaultInstance(); case 1: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance(); case 2: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance(); case 3: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.getDefaultInstance(); case 4: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance(); + case 5: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -19065,14 +20234,16 @@ public void updateFragment( } switch(method.getIndex()) { case 0: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.getDefaultInstance(); case 1: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance(); case 2: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance(); case 3: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance(); case 4: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance(); + case 5: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -19082,6 +20253,14 @@ public void updateFragment( }; } + /** + * rpc registerDag(.RegisterDagRequestProto) returns (.RegisterDagResponseProto); + */ + public abstract void registerDag( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto request, + com.google.protobuf.RpcCallback done); + /** * rpc submitWork(.SubmitWorkRequestProto) returns (.SubmitWorkResponseProto); */ @@ -19145,26 +20324,31 @@ public final void callMethod( } switch(method.getIndex()) { case 0: + this.registerDag(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: this.submitWork(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 1: + case 2: this.sourceStateUpdated(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 2: + case 3: this.queryComplete(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 3: + case 4: this.terminateFragment(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 4: + case 5: this.updateFragment(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto)request, com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -19184,14 +20368,16 @@ public final void callMethod( } switch(method.getIndex()) { case 0: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.getDefaultInstance(); case 1: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance(); case 2: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance(); case 3: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.getDefaultInstance(); case 4: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance(); + case 5: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -19208,14 +20394,16 @@ public final void callMethod( } switch(method.getIndex()) { case 0: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.getDefaultInstance(); case 1: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance(); case 2: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance(); case 3: - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance(); + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance(); case 4: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance(); + case 5: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -19238,12 +20426,27 @@ private Stub(com.google.protobuf.RpcChannel channel) { return channel; } + public void registerDag( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.class, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.getDefaultInstance())); + } + public void submitWork( com.google.protobuf.RpcController controller, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(0), + getDescriptor().getMethods().get(1), controller, request, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance(), @@ -19258,7 +20461,7 @@ public void sourceStateUpdated( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(1), + getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance(), @@ -19273,7 +20476,7 @@ public void queryComplete( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(2), + getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance(), @@ -19288,7 +20491,7 @@ public void terminateFragment( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(3), + getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance(), @@ -19303,7 +20506,7 @@ public void updateFragment( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(4), + getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto.getDefaultInstance(), @@ -19320,6 +20523,11 @@ public static BlockingInterface newBlockingStub( } public interface BlockingInterface { + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto registerDag( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto request) + throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto submitWork( com.google.protobuf.RpcController controller, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request) @@ -19353,12 +20561,24 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { private final com.google.protobuf.BlockingRpcChannel channel; + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto registerDag( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.getDefaultInstance()); + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto submitWork( com.google.protobuf.RpcController controller, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(0), + getDescriptor().getMethods().get(1), controller, request, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance()); @@ -19370,7 +20590,7 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(1), + getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance()); @@ -19382,7 +20602,7 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(2), + getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance()); @@ -19394,7 +20614,7 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(3), + getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance()); @@ -19406,7 +20626,7 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(4), + getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto.getDefaultInstance()); @@ -19770,6 +20990,16 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SubmitWorkRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegisterDagRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegisterDagRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegisterDagResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegisterDagResponseProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_SubmitWorkResponseProto_descriptor; private static @@ -19894,51 +21124,56 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { "\t \001(\0132\024.FragmentRuntimeInfo\022\033\n\023initial_e" + "vent_bytes\030\n \001(\014\022\037\n\027initial_event_signat" + "ure\030\013 \001(\014\022\034\n\ris_guaranteed\030\014 \001(\010:\005false\"" + - "b\n\027SubmitWorkResponseProto\022/\n\020submission" + - "_state\030\001 \001(\0162\025.SubmissionStateProto\022\026\n\016u" + - "nique_node_id\030\002 \001(\t\"\205\001\n\036SourceStateUpdat" + - "edRequestProto\022/\n\020query_identifier\030\001 \001(\013" + - "2\025.QueryIdentifierProto\022\020\n\010src_name\030\002 \001(", - "\t\022 \n\005state\030\003 \001(\0162\021.SourceStateProto\"!\n\037S" + - "ourceStateUpdatedResponseProto\"e\n\031QueryC" + - "ompleteRequestProto\022/\n\020query_identifier\030" + - "\001 \001(\0132\025.QueryIdentifierProto\022\027\n\014delete_d" + - "elay\030\002 \001(\003:\0010\"\034\n\032QueryCompleteResponsePr" + - "oto\"t\n\035TerminateFragmentRequestProto\022/\n\020" + - "query_identifier\030\001 \001(\0132\025.QueryIdentifier" + - "Proto\022\"\n\032fragment_identifier_string\030\002 \001(" + - "\t\" \n\036TerminateFragmentResponseProto\"\210\001\n\032" + - "UpdateFragmentRequestProto\022/\n\020query_iden", - "tifier\030\001 \001(\0132\025.QueryIdentifierProto\022\"\n\032f" + - "ragment_identifier_string\030\002 \001(\t\022\025\n\ris_gu" + - "aranteed\030\003 \001(\010\"D\n\033UpdateFragmentResponse" + - "Proto\022\016\n\006result\030\001 \001(\010\022\025\n\ris_guaranteed\030\002" + - " \001(\010\"&\n\024GetTokenRequestProto\022\016\n\006app_id\030\001" + - " \001(\t\"&\n\025GetTokenResponseProto\022\r\n\005token\030\001" + - " \001(\014\"A\n\033LlapOutputSocketInitMessage\022\023\n\013f" + - "ragment_id\030\001 \002(\t\022\r\n\005token\030\002 \001(\014\"\030\n\026Purge" + - "CacheRequestProto\"6\n\027PurgeCacheResponseP" + - "roto\022\033\n\023purged_memory_bytes\030\001 \001(\003*2\n\020Sou", - "rceStateProto\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNN" + - "ING\020\002*E\n\024SubmissionStateProto\022\014\n\010ACCEPTE" + - "D\020\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICTED_OTHER\020\0032\233\003\n" + - "\022LlapDaemonProtocol\022?\n\nsubmitWork\022\027.Subm" + - "itWorkRequestProto\032\030.SubmitWorkResponseP" + - "roto\022W\n\022sourceStateUpdated\022\037.SourceState" + - "UpdatedRequestProto\032 .SourceStateUpdated" + - "ResponseProto\022H\n\rqueryComplete\022\032.QueryCo" + - "mpleteRequestProto\032\033.QueryCompleteRespon" + - "seProto\022T\n\021terminateFragment\022\036.Terminate", - "FragmentRequestProto\032\037.TerminateFragment" + - "ResponseProto\022K\n\016updateFragment\022\033.Update" + - "FragmentRequestProto\032\034.UpdateFragmentRes" + - "ponseProto2\236\001\n\026LlapManagementProtocol\022C\n" + - "\022getDelegationToken\022\025.GetTokenRequestPro" + - "to\032\026.GetTokenResponseProto\022?\n\npurgeCache" + - "\022\027.PurgeCacheRequestProto\032\030.PurgeCacheRe" + - "sponseProtoBH\n&org.apache.hadoop.hive.ll" + - "ap.daemon.rpcB\030LlapDaemonProtocolProtos\210" + - "\001\001\240\001\001" + "t\n\027RegisterDagRequestProto\022\014\n\004user\030\001 \001(\t" + + "\022/\n\020query_identifier\030\002 \002(\0132\025.QueryIdenti" + + "fierProto\022\032\n\022credentials_binary\030\003 \001(\014\"\032\n" + + "\030RegisterDagResponseProto\"b\n\027SubmitWorkR" + + "esponseProto\022/\n\020submission_state\030\001 \001(\0162\025", + ".SubmissionStateProto\022\026\n\016unique_node_id\030" + + "\002 \001(\t\"\205\001\n\036SourceStateUpdatedRequestProto" + + "\022/\n\020query_identifier\030\001 \001(\0132\025.QueryIdenti" + + "fierProto\022\020\n\010src_name\030\002 \001(\t\022 \n\005state\030\003 \001" + + "(\0162\021.SourceStateProto\"!\n\037SourceStateUpda" + + "tedResponseProto\"e\n\031QueryCompleteRequest" + + "Proto\022/\n\020query_identifier\030\001 \001(\0132\025.QueryI" + + "dentifierProto\022\027\n\014delete_delay\030\002 \001(\003:\0010\"" + + "\034\n\032QueryCompleteResponseProto\"t\n\035Termina" + + "teFragmentRequestProto\022/\n\020query_identifi", + "er\030\001 \001(\0132\025.QueryIdentifierProto\022\"\n\032fragm" + + "ent_identifier_string\030\002 \001(\t\" \n\036Terminate" + + "FragmentResponseProto\"\210\001\n\032UpdateFragment" + + "RequestProto\022/\n\020query_identifier\030\001 \001(\0132\025" + + ".QueryIdentifierProto\022\"\n\032fragment_identi" + + "fier_string\030\002 \001(\t\022\025\n\ris_guaranteed\030\003 \001(\010" + + "\"D\n\033UpdateFragmentResponseProto\022\016\n\006resul" + + "t\030\001 \001(\010\022\025\n\ris_guaranteed\030\002 \001(\010\"&\n\024GetTok" + + "enRequestProto\022\016\n\006app_id\030\001 \001(\t\"&\n\025GetTok" + + "enResponseProto\022\r\n\005token\030\001 \001(\014\"A\n\033LlapOu", + "tputSocketInitMessage\022\023\n\013fragment_id\030\001 \002" + + "(\t\022\r\n\005token\030\002 \001(\014\"\030\n\026PurgeCacheRequestPr" + + "oto\"6\n\027PurgeCacheResponseProto\022\033\n\023purged" + + "_memory_bytes\030\001 \001(\003*2\n\020SourceStateProto\022" + + "\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNING\020\002*E\n\024Submi" + + "ssionStateProto\022\014\n\010ACCEPTED\020\001\022\014\n\010REJECTE" + + "D\020\002\022\021\n\rEVICTED_OTHER\020\0032\337\003\n\022LlapDaemonPro" + + "tocol\022B\n\013registerDag\022\030.RegisterDagReques" + + "tProto\032\031.RegisterDagResponseProto\022?\n\nsub" + + "mitWork\022\027.SubmitWorkRequestProto\032\030.Submi", + "tWorkResponseProto\022W\n\022sourceStateUpdated" + + "\022\037.SourceStateUpdatedRequestProto\032 .Sour" + + "ceStateUpdatedResponseProto\022H\n\rqueryComp" + + "lete\022\032.QueryCompleteRequestProto\032\033.Query" + + "CompleteResponseProto\022T\n\021terminateFragme" + + "nt\022\036.TerminateFragmentRequestProto\032\037.Ter" + + "minateFragmentResponseProto\022K\n\016updateFra" + + "gment\022\033.UpdateFragmentRequestProto\032\034.Upd" + + "ateFragmentResponseProto2\236\001\n\026LlapManagem" + + "entProtocol\022C\n\022getDelegationToken\022\025.GetT", + "okenRequestProto\032\026.GetTokenResponseProto" + + "\022?\n\npurgeCache\022\027.PurgeCacheRequestProto\032" + + "\030.PurgeCacheResponseProtoBH\n&org.apache." + + "hadoop.hive.llap.daemon.rpcB\030LlapDaemonP" + + "rotocolProtos\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -20005,86 +21240,98 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SubmitWorkRequestProto_descriptor, new java.lang.String[] { "WorkSpec", "WorkSpecSignature", "FragmentNumber", "AttemptNumber", "ContainerIdString", "AmHost", "AmPort", "CredentialsBinary", "FragmentRuntimeInfo", "InitialEventBytes", "InitialEventSignature", "IsGuaranteed", }); - internal_static_SubmitWorkResponseProto_descriptor = + internal_static_RegisterDagRequestProto_descriptor = getDescriptor().getMessageTypes().get(10); + internal_static_RegisterDagRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegisterDagRequestProto_descriptor, + new java.lang.String[] { "User", "QueryIdentifier", "CredentialsBinary", }); + internal_static_RegisterDagResponseProto_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_RegisterDagResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegisterDagResponseProto_descriptor, + new java.lang.String[] { }); + internal_static_SubmitWorkResponseProto_descriptor = + getDescriptor().getMessageTypes().get(12); internal_static_SubmitWorkResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SubmitWorkResponseProto_descriptor, new java.lang.String[] { "SubmissionState", "UniqueNodeId", }); internal_static_SourceStateUpdatedRequestProto_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(13); internal_static_SourceStateUpdatedRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SourceStateUpdatedRequestProto_descriptor, new java.lang.String[] { "QueryIdentifier", "SrcName", "State", }); internal_static_SourceStateUpdatedResponseProto_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(14); internal_static_SourceStateUpdatedResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SourceStateUpdatedResponseProto_descriptor, new java.lang.String[] { }); internal_static_QueryCompleteRequestProto_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(15); internal_static_QueryCompleteRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_QueryCompleteRequestProto_descriptor, new java.lang.String[] { "QueryIdentifier", "DeleteDelay", }); internal_static_QueryCompleteResponseProto_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(16); internal_static_QueryCompleteResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_QueryCompleteResponseProto_descriptor, new java.lang.String[] { }); internal_static_TerminateFragmentRequestProto_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(17); internal_static_TerminateFragmentRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TerminateFragmentRequestProto_descriptor, new java.lang.String[] { "QueryIdentifier", "FragmentIdentifierString", }); internal_static_TerminateFragmentResponseProto_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(18); internal_static_TerminateFragmentResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TerminateFragmentResponseProto_descriptor, new java.lang.String[] { }); internal_static_UpdateFragmentRequestProto_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_UpdateFragmentRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UpdateFragmentRequestProto_descriptor, new java.lang.String[] { "QueryIdentifier", "FragmentIdentifierString", "IsGuaranteed", }); internal_static_UpdateFragmentResponseProto_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_UpdateFragmentResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UpdateFragmentResponseProto_descriptor, new java.lang.String[] { "Result", "IsGuaranteed", }); internal_static_GetTokenRequestProto_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_GetTokenRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetTokenRequestProto_descriptor, new java.lang.String[] { "AppId", }); internal_static_GetTokenResponseProto_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_GetTokenResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetTokenResponseProto_descriptor, new java.lang.String[] { "Token", }); internal_static_LlapOutputSocketInitMessage_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(23); internal_static_LlapOutputSocketInitMessage_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LlapOutputSocketInitMessage_descriptor, new java.lang.String[] { "FragmentId", "Token", }); internal_static_PurgeCacheRequestProto_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(24); internal_static_PurgeCacheRequestProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_PurgeCacheRequestProto_descriptor, new java.lang.String[] { }); internal_static_PurgeCacheResponseProto_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(25); internal_static_PurgeCacheResponseProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_PurgeCacheResponseProto_descriptor, diff --git llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapProtocolClientImpl.java llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapProtocolClientImpl.java index bdffbbfc22..f7f7d72b3d 100644 --- llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapProtocolClientImpl.java +++ llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapProtocolClientImpl.java @@ -23,6 +23,7 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto; @@ -74,6 +75,18 @@ public LlapProtocolClientImpl(Configuration conf, String hostname, int port, } } + @Override + public LlapDaemonProtocolProtos.RegisterDagResponseProto registerDag( + RpcController controller, + LlapDaemonProtocolProtos.RegisterDagRequestProto request) + throws ServiceException { + try { + return getProxy().registerDag(null, request); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public SubmitWorkResponseProto submitWork( RpcController controller, SubmitWorkRequestProto request) throws ServiceException { diff --git llap-common/src/protobuf/LlapDaemonProtocol.proto llap-common/src/protobuf/LlapDaemonProtocol.proto index d70dd41a83..3aeacb2da5 100644 --- llap-common/src/protobuf/LlapDaemonProtocol.proto +++ llap-common/src/protobuf/LlapDaemonProtocol.proto @@ -134,6 +134,14 @@ message SubmitWorkRequestProto { optional bool is_guaranteed = 12 [default = false]; } +message RegisterDagRequestProto { + optional string user = 1; + required QueryIdentifierProto query_identifier = 2; + optional bytes credentials_binary = 3; +} + +message RegisterDagResponseProto { +} enum SubmissionStateProto { ACCEPTED = 1; @@ -204,6 +212,7 @@ message PurgeCacheResponseProto { } service LlapDaemonProtocol { + rpc registerDag(RegisterDagRequestProto) returns (RegisterDagResponseProto); rpc submitWork(SubmitWorkRequestProto) returns (SubmitWorkResponseProto); rpc sourceStateUpdated(SourceStateUpdatedRequestProto) returns (SourceStateUpdatedResponseProto); rpc queryComplete(QueryCompleteRequestProto) returns (QueryCompleteResponseProto); diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java index 035960e347..582f518315 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java @@ -16,6 +16,7 @@ import java.io.IOException; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto; @@ -29,6 +30,10 @@ public interface ContainerRunner { + LlapDaemonProtocolProtos.RegisterDagResponseProto registerDag( + LlapDaemonProtocolProtos.RegisterDagRequestProto request) + throws IOException; + SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws IOException; SourceStateUpdatedResponseProto sourceStateUpdated( diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonUtils.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonUtils.java new file mode 100644 index 0000000000..1fbd41b3cc --- /dev/null +++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/LlapDaemonUtils.java @@ -0,0 +1,17 @@ +package org.apache.hadoop.hive.llap.daemon; + +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.security.Credentials; + +import java.io.IOException; + +public class LlapDaemonUtils { + public static Credentials credentialsFromByteArray(byte[] binaryCredentials) + throws IOException { + Credentials credentials = new Credentials(); + DataInputBuffer dib = new DataInputBuffer(); + dib.reset(binaryCredentials, binaryCredentials.length); + credentials.readTokenStorageStream(dib); + return credentials; + } +} diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java index ef5922ef41..7b5e1825d0 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java @@ -22,13 +22,13 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReadWriteLock; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.UgiFactory; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.llap.DaemonId; import org.apache.hadoop.hive.llap.LlapNodeId; import org.apache.hadoop.hive.llap.NotTezEventHelper; @@ -39,6 +39,7 @@ import org.apache.hadoop.hive.llap.daemon.FragmentCompletionHandler; import org.apache.hadoop.hive.llap.daemon.HistoryLogger; import org.apache.hadoop.hive.llap.daemon.KilledTaskHandler; +import org.apache.hadoop.hive.llap.daemon.LlapDaemonUtils; import org.apache.hadoop.hive.llap.daemon.QueryFailedHandler; import org.apache.hadoop.hive.llap.daemon.SchedulerFragmentCompletingListener; import org.apache.hadoop.hive.llap.daemon.impl.LlapTokenChecker.LlapTokenInfo; @@ -55,6 +56,8 @@ import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto; @@ -62,6 +65,7 @@ import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary; import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics; import org.apache.hadoop.hive.llap.security.LlapSignerImpl; +import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler; import org.apache.hadoop.hive.llap.tez.Converters; import org.apache.hadoop.hive.llap.tezplugins.LlapTezUtils; import org.apache.hadoop.hive.ql.exec.tez.WorkloadManager; @@ -123,7 +127,8 @@ public ContainerRunnerImpl(Configuration conf, int numExecutors, int waitQueueSi boolean enablePreemption, String[] localDirsBase, AtomicReference localShufflePort, AtomicReference localAddress, long totalMemoryAvailableBytes, LlapDaemonExecutorMetrics metrics, - AMReporter amReporter, ClassLoader classLoader, DaemonId daemonId, UgiFactory fsUgiFactory, + AMReporter amReporter, QueryTracker queryTracker, Scheduler executorService, + DaemonId daemonId, UgiFactory fsUgiFactory, SocketFactory socketFactory) { super("ContainerRunnerImpl"); Preconditions.checkState(numExecutors > 0, @@ -138,15 +143,10 @@ public ContainerRunnerImpl(Configuration conf, int numExecutors, int waitQueueSi this.clusterId = daemonId.getClusterString(); this.daemonId = daemonId; - this.queryTracker = new QueryTracker(conf, localDirsBase, clusterId); - addIfService(queryTracker); - String waitQueueSchedulerClassName = HiveConf.getVar( - conf, ConfVars.LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME); - this.executorService = new TaskExecutorService(numExecutors, waitQueueSize, - waitQueueSchedulerClassName, enablePreemption, classLoader, metrics, null); + this.queryTracker = queryTracker; + this.executorService = executorService; completionListener = (SchedulerFragmentCompletingListener) executorService; - addIfService(executorService); // Distribute the available memory between the tasks. this.memoryPerExecutor = (long)(totalMemoryAvailableBytes / (float) numExecutors); @@ -186,6 +186,17 @@ protected void serviceStop() throws Exception { super.serviceStop(); } + @Override + public RegisterDagResponseProto registerDag(RegisterDagRequestProto request) + throws IOException { + QueryIdentifierProto identifier = request.getQueryIdentifier(); + queryTracker.registerDag(identifier.getApplicationIdString(), + identifier.getDagIndex(), request.getUser(), + LlapDaemonUtils.credentialsFromByteArray( + request.getCredentialsBinary().toByteArray())); + return RegisterDagResponseProto.newBuilder().build(); + } + @Override public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws IOException { LlapTokenInfo tokenInfo = null; @@ -242,11 +253,8 @@ public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws QueryIdentifier queryIdentifier = new QueryIdentifier( qIdProto.getApplicationIdString(), dagIdentifier); - Credentials credentials = new Credentials(); - DataInputBuffer dib = new DataInputBuffer(); - byte[] tokenBytes = request.getCredentialsBinary().toByteArray(); - dib.reset(tokenBytes, tokenBytes.length); - credentials.readTokenStorageStream(dib); + Credentials credentials = LlapDaemonUtils.credentialsFromByteArray( + request.getCredentialsBinary().toByteArray()); Token jobToken = TokenCache.getSessionToken(credentials); diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java index 52990c5f05..18d39bdf42 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java @@ -45,6 +45,8 @@ import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration; import org.apache.hadoop.hive.llap.daemon.ContainerRunner; import org.apache.hadoop.hive.llap.daemon.QueryFailedHandler; +import org.apache.hadoop.hive.llap.daemon.SchedulerFragmentCompletingListener; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto; @@ -291,9 +293,22 @@ public LlapDaemon(Configuration daemonConf, int numExecutors, long executorMemor } catch (IOException e) { throw new RuntimeException(e); } + + QueryTracker queryTracker = new QueryTracker(daemonConf, localDirs, + daemonId.getClusterString()); + + String waitQueueSchedulerClassName = HiveConf.getVar( + daemonConf, ConfVars.LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME); + + Scheduler executorService = new TaskExecutorService(numExecutors, waitQueueSize, + waitQueueSchedulerClassName, enablePreemption, executorClassLoader, metrics, null); + + addIfService(queryTracker); + addIfService(executorService); + this.containerRunner = new ContainerRunnerImpl(daemonConf, numExecutors, waitQueueSize, enablePreemption, localDirs, this.shufflePort, srvAddress, executorMemoryPerInstance, metrics, - amReporter, executorClassLoader, daemonId, fsUgiFactory, socketFactory); + amReporter, queryTracker, executorService, daemonId, fsUgiFactory, socketFactory); addIfService(containerRunner); // Not adding the registry as a service, since we need to control when it is initialized - conf used to pickup properties. @@ -551,6 +566,13 @@ public static void main(String[] args) throws Exception { } } + @Override + public LlapDaemonProtocolProtos.RegisterDagResponseProto registerDag( + LlapDaemonProtocolProtos.RegisterDagRequestProto request) + throws IOException { + return containerRunner.registerDag(request); + } + @Override public SubmitWorkResponseProto submitWork( SubmitWorkRequestProto request) throws IOException { diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java index d856b2580a..c3a95c8b30 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java @@ -97,6 +97,18 @@ public LlapProtocolServerImpl(SecretManager secretManager, int numHandlers, " with port configured to: " + srvPort); } + @Override + public LlapDaemonProtocolProtos.RegisterDagResponseProto registerDag( + RpcController controller, + LlapDaemonProtocolProtos.RegisterDagRequestProto request) + throws ServiceException { + try { + return containerRunner.registerDag(request); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public SubmitWorkResponseProto submitWork(RpcController controller, SubmitWorkRequestProto request) throws ServiceException { diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java index ab84dcc5b3..11d728c07e 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java @@ -22,8 +22,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.llap.LlapNodeId; +import org.apache.hadoop.hive.llap.daemon.LlapDaemonUtils; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; import org.apache.hadoop.hive.llap.log.Log4jQueryCompleteMarker; import org.apache.hadoop.hive.llap.log.LogHelpers; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; @@ -41,6 +44,7 @@ import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler; import org.apache.hadoop.hive.ql.exec.ObjectCacheFactory; import org.apache.tez.common.security.JobTokenIdentifier; +import org.apache.tez.common.security.TokenCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; @@ -202,6 +206,21 @@ QueryFragmentInfo registerFragment(QueryIdentifier queryIdentifier, String appId } } + public void registerDag(String applicationId, int dagId, String user, + Credentials credentials) { + Token jobToken = TokenCache.getSessionToken(credentials); + + QueryIdentifier queryIdentifier = new QueryIdentifier(applicationId, dagId); + ReadWriteLock dagLock = getDagLock(queryIdentifier); + dagLock.readLock().lock(); + try { + ShuffleHandler.get() + .registerDag(applicationId, dagId, jobToken, user, null); + } finally { + dagLock.readLock().unlock(); + } + } + /** * Indicate to the tracker that a fragment is complete. This is from internal execution within the daemon * @param fragmentInfo diff --git llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java index 18a37a2adc..3f9191a621 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java @@ -49,6 +49,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Pattern; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import com.google.common.cache.CacheBuilder; @@ -169,6 +170,7 @@ /* List of registered applications */ private final ConcurrentMap registeredApps = new ConcurrentHashMap<>(); + private final ConcurrentMap registeredDirectories = new ConcurrentHashMap<>(); /* Maps application identifiers (jobIds) to the associated user for the app */ private final ConcurrentMap userRsrc; private JobTokenSecretManager secretManager; @@ -433,11 +435,14 @@ public int getPort() { * Register an application and it's associated credentials and user information. * * This method and unregisterDag must be synchronized externally to prevent races in shuffle token registration/unregistration + * This method may be called several times but we can only set the registeredDirectories once which will be + * in the first call in which they are not null. * * @param applicationIdString * @param dagIdentifier * @param appToken * @param user + * @param appDirs */ public void registerDag(String applicationIdString, int dagIdentifier, Token appToken, @@ -454,6 +459,14 @@ public void registerDag(String applicationIdString, int dagIdentifier, // Don't need to recordShuffleInfo since the out of sync unregister will not remove the // credentials } + + if (appDirs == null) { + return; + } + registeredDagIdentifier = registeredDirectories.put(applicationIdString, dagIdentifier); + if (registeredDagIdentifier != null && !registeredDagIdentifier.equals(dagIdentifier)) { + registeredDirectories.put(applicationIdString, dagIdentifier); + } // First time registration, or new register comes in before the previous unregister. if (registeredDagIdentifier == null || !registeredDagIdentifier.equals(dagIdentifier)) { if (dirWatcher != null) { @@ -487,6 +500,7 @@ public void unregisterDag(String dir, String applicationIdString, int dagIdentif // be synchronized, hence the following check is sufficient. if (currentDagIdentifier != null && currentDagIdentifier.equals(dagIdentifier)) { registeredApps.remove(applicationIdString); + registeredDirectories.remove(applicationIdString); removeJobShuffleInfo(applicationIdString); } // Unregister for the dirWatcher for the specific dagIdentifier in either case. @@ -514,6 +528,16 @@ protected void stop() throws Exception { } } + @VisibleForTesting + public HashMap getRegisteredApps() { + return new HashMap<>(registeredApps); + } + + @VisibleForTesting + public HashMap getRegisteredDirectories() { + return new HashMap<>(registeredDirectories); + } + protected Shuffle getShuffle(Configuration conf) { return shuffle; } diff --git llap-server/src/test/org/apache/hadoop/hive/llap/daemon/TestLlapDaemonUtils.java llap-server/src/test/org/apache/hadoop/hive/llap/daemon/TestLlapDaemonUtils.java new file mode 100644 index 0000000000..5862f8ea63 --- /dev/null +++ llap-server/src/test/org/apache/hadoop/hive/llap/daemon/TestLlapDaemonUtils.java @@ -0,0 +1,57 @@ +package org.apache.hadoop.hive.llap.daemon; + +import com.google.protobuf.ByteString; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto; +import org.apache.hadoop.hive.llap.tezplugins.LlapTezUtils; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.tez.dag.records.TezDAGID; +import org.apache.tez.dag.records.TezVertexID; + +import java.io.IOException; + +public class TestLlapDaemonUtils { + public static SubmitWorkRequestProto buildSubmitProtoRequest(int fragmentNumber, + String appId, int dagId, int vId, String dagName, + int dagStartTime, int attemptStartTime, int numSelfAndUpstreamTasks, int numSelfAndUpstreamComplete, + int withinDagPriority, Credentials credentials) throws IOException { + return SubmitWorkRequestProto + .newBuilder() + .setAttemptNumber(0) + .setFragmentNumber(fragmentNumber) + .setWorkSpec( + LlapDaemonProtocolProtos.VertexOrBinary.newBuilder().setVertex( + LlapDaemonProtocolProtos.SignableVertexSpec + .newBuilder() + .setQueryIdentifier( + LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder() + .setApplicationIdString(appId) + .setAppAttemptNumber(0) + .setDagIndex(dagId) + .build()) + .setVertexIndex(vId) + .setDagName(dagName) + .setHiveQueryId(dagName) + .setVertexName("MockVertex") + .setUser("MockUser") + .setTokenIdentifier("MockToken_1") + .setProcessorDescriptor( + LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder().setClassName("MockProcessor").build()) + .build()).build()) + .setAmHost("localhost") + .setAmPort(12345) + .setCredentialsBinary(ByteString.copyFrom(LlapTezUtils.serializeCredentials(credentials))) + .setContainerIdString("MockContainer_1") + .setFragmentRuntimeInfo(LlapDaemonProtocolProtos + .FragmentRuntimeInfo + .newBuilder() + .setDagStartTime(dagStartTime) + .setFirstAttemptStartTime(attemptStartTime) + .setNumSelfAndUpstreamTasks(numSelfAndUpstreamTasks) + .setNumSelfAndUpstreamCompletedTasks(numSelfAndUpstreamComplete) + .setWithinDagPriority(withinDagPriority) + .build()) + .build(); + } +} diff --git llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestContainerRunnerImpl.java llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestContainerRunnerImpl.java new file mode 100644 index 0000000000..12335f1a4e --- /dev/null +++ llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestContainerRunnerImpl.java @@ -0,0 +1,164 @@ +package org.apache.hadoop.hive.llap.daemon.impl; + +import com.google.common.primitives.Ints; +import com.google.protobuf.ByteString; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.llap.DaemonId; +import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration; +import org.apache.hadoop.hive.llap.daemon.TestLlapDaemonUtils; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto; +import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics; +import org.apache.hadoop.hive.llap.metrics.LlapMetricsSystem; +import org.apache.hadoop.hive.llap.metrics.MetricsUtils; +import org.apache.hadoop.hive.llap.security.LlapTokenIdentifier; +import org.apache.hadoop.hive.llap.security.LlapUgiFactoryFactory; +import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler; +import org.apache.hadoop.hive.llap.tezplugins.LlapTezUtils; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.service.AbstractService; +import org.apache.tez.common.security.TokenCache; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import javax.net.SocketFactory; +import java.io.File; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; + +import static org.mockito.Mockito.mock; + +public class TestContainerRunnerImpl { + ContainerRunnerImpl containerRunner; + LlapDaemonConfiguration daemonConf = new LlapDaemonConfiguration(); + private final int numExecutors = 1; + private final int waitQueueSize = HiveConf.getIntVar( + daemonConf, HiveConf.ConfVars.LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE); + private final boolean enablePreemption = false; + private final int numLocalDirs = 1; + private final String[] localDirs = new String[numLocalDirs]; + private final File testWorkDir = new File("target", "container-runner-tests"); + private final AtomicReference shufflePort = new AtomicReference<>(); + private final AtomicReference srvAddress = new AtomicReference<>(); + private final int executorMemoryPerInstance = 1024; + private LlapDaemonExecutorMetrics metrics; + private AMReporter amReporter; + private final String testUser = "testUser"; + private final String appId = "application_1540489363818_0021"; + private final int dagId = 1234; + private final int vId = 12345; + private final String hostname = "test.cluster"; + private final DaemonId daemonId = new DaemonId(testUser, + "ContainerTests", hostname, + appId, System.currentTimeMillis()); + private final SocketFactory socketFactory = NetUtils.getDefaultSocketFactory(daemonConf); + private QueryTracker queryTracker; + private TaskExecutorService executorService; + private InetSocketAddress serverSocket; + + + @Before + public void setup() throws Exception { + + String[] strIntervals = HiveConf.getTrimmedStringsVar(daemonConf, + HiveConf.ConfVars.LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS); + List intervalList = new ArrayList<>(); + if (strIntervals != null) { + for (String strInterval : strIntervals) { + intervalList.add(Integer.valueOf(strInterval)); + } + } + + amReporter = mock(AMReporter.class); + serverSocket = new InetSocketAddress("localhost", 0); + srvAddress.set(serverSocket); + + this.metrics = LlapDaemonExecutorMetrics + .create("ContinerRunerTests", MetricsUtils.getUUID(), numExecutors, + Ints.toArray(intervalList)); + + for (int i = 0 ; i < numLocalDirs ; i++) { + File f = new File(testWorkDir, "localDir"); + f.mkdirs(); + localDirs[i] = f.getAbsolutePath(); + } + String waitQueueSchedulerClassName = HiveConf.getVar( + daemonConf, HiveConf.ConfVars.LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME); + + queryTracker = new QueryTracker(daemonConf, localDirs, daemonId.getClusterString()); + executorService = new TaskExecutorService(numExecutors, waitQueueSize, + waitQueueSchedulerClassName, enablePreemption, Thread.currentThread().getContextClassLoader(), metrics, null); + + shufflePort.set(HiveConf.getIntVar( + daemonConf, HiveConf.ConfVars.LLAP_DAEMON_RPC_PORT)); + containerRunner = new ContainerRunnerImpl(daemonConf, numExecutors, waitQueueSize, + enablePreemption, localDirs, this.shufflePort, srvAddress, executorMemoryPerInstance, metrics, + amReporter, queryTracker, executorService, daemonId, LlapUgiFactoryFactory + .createFsUgiFactory(daemonConf), socketFactory); + + ShuffleHandler.initializeAndStart(daemonConf); + + executorService.init(daemonConf); + executorService.start(); + queryTracker.init(daemonConf); + queryTracker.start(); + containerRunner.init(daemonConf); + containerRunner.start(); + } + + @After + public void cleanup() throws Exception { + containerRunner.serviceStop(); + queryTracker.serviceStop(); + executorService.serviceStop(); + executorService.serviceStop(); + LlapMetricsSystem.shutdown(); + } + + @Test(timeout = 10000) + public void testRegisterDag() throws Exception { + Credentials credentials = new Credentials(); + Token sessionToken = new Token<>( + "identifier".getBytes(), "testPassword".getBytes(), new Text("kind"), new Text("service")); + TokenCache.setSessionToken(sessionToken, credentials); + + RegisterDagRequestProto request = RegisterDagRequestProto.newBuilder() + .setUser(testUser) + .setCredentialsBinary(ByteString.copyFrom(LlapTezUtils.serializeCredentials(credentials))) + .setQueryIdentifier( + QueryIdentifierProto.newBuilder() + .setApplicationIdString(appId) + .setDagIndex(dagId) + .build()) + .build(); + containerRunner.registerDag(request); + Assert.assertEquals(ShuffleHandler.get().getRegisteredApps().size(), 1); + Assert.assertEquals((long)ShuffleHandler.get().getRegisteredApps().get(appId), dagId); + Assert.assertEquals(ShuffleHandler.get().getRegisteredDirectories().size(), 0); + + containerRunner.registerDag(request); + Assert.assertEquals(ShuffleHandler.get().getRegisteredApps().size(), 1); + Assert.assertEquals((long)ShuffleHandler.get().getRegisteredApps().get(appId), dagId); + Assert.assertEquals(ShuffleHandler.get().getRegisteredDirectories().size(), 0); + + SubmitWorkRequestProto sRequest = + TestLlapDaemonUtils.buildSubmitProtoRequest(1, appId, + dagId, vId,"dagName", 0, 0, + 0, 0, 1, + credentials); + + containerRunner.submitWork(sRequest); + Assert.assertEquals(ShuffleHandler.get().getRegisteredApps().size(), 1); + Assert.assertEquals((long)ShuffleHandler.get().getRegisteredApps().get(appId), dagId); + Assert.assertEquals(ShuffleHandler.get().getRegisteredDirectories().size(), 1); + Assert.assertEquals((long)ShuffleHandler.get().getRegisteredDirectories().get(appId), dagId); + } +} diff --git llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java index d3aa53942b..29cb1ee13e 100644 --- llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java +++ llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; +import org.apache.hadoop.hive.llap.daemon.TestLlapDaemonUtils; import org.apache.hadoop.hive.llap.daemon.impl.EvictingPriorityBlockingQueue; import org.apache.hadoop.hive.llap.daemon.impl.TaskExecutorService.TaskWrapper; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; @@ -30,22 +31,25 @@ import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.tez.dag.records.TezDAGID; import org.apache.tez.dag.records.TezVertexID; import org.junit.Test; +import java.io.IOException; + public class TestFirstInFirstOutComparator { private SubmitWorkRequestProto createRequest(int fragmentNumber, int numSelfAndUpstreamTasks, int dagStartTime, - int attemptStartTime) { + int attemptStartTime) throws IOException { // Same priority for all tasks. return createRequest(fragmentNumber, numSelfAndUpstreamTasks, 0, dagStartTime, attemptStartTime, 1); } private SubmitWorkRequestProto createRequest(int fragmentNumber, int numSelfAndUpstreamTasks, int numSelfAndUpstreamComplete, int dagStartTime, - int attemptStartTime, int withinDagPriority) { + int attemptStartTime, int withinDagPriority) throws IOException { return createRequest(fragmentNumber, numSelfAndUpstreamTasks, numSelfAndUpstreamComplete, dagStartTime, attemptStartTime, withinDagPriority, "MockDag"); } @@ -54,50 +58,18 @@ private SubmitWorkRequestProto createRequest(int fragmentNumber, int numSelfAndU private SubmitWorkRequestProto createRequest(int fragmentNumber, int numSelfAndUpstreamTasks, int numSelfAndUpstreamComplete, int dagStartTime, int attemptStartTime, int withinDagPriority, - String dagName) { + String dagName) throws IOException { ApplicationId appId = ApplicationId.newInstance(9999, 72); TezDAGID dagId = TezDAGID.getInstance(appId, 1); TezVertexID vId = TezVertexID.getInstance(dagId, 35); - return SubmitWorkRequestProto - .newBuilder() - .setAttemptNumber(0) - .setFragmentNumber(fragmentNumber) - .setWorkSpec( - VertexOrBinary.newBuilder().setVertex( - SignableVertexSpec - .newBuilder() - .setQueryIdentifier( - QueryIdentifierProto.newBuilder() - .setApplicationIdString(appId.toString()) - .setAppAttemptNumber(0) - .setDagIndex(dagId.getId()) - .build()) - .setVertexIndex(vId.getId()) - .setDagName(dagName) - .setHiveQueryId(dagName) - .setVertexName("MockVertex") - .setUser("MockUser") - .setTokenIdentifier("MockToken_1") - .setProcessorDescriptor( - EntityDescriptorProto.newBuilder().setClassName("MockProcessor").build()) - .build()).build()) - .setAmHost("localhost") - .setAmPort(12345) - .setContainerIdString("MockContainer_1") - .setFragmentRuntimeInfo(LlapDaemonProtocolProtos - .FragmentRuntimeInfo - .newBuilder() - .setDagStartTime(dagStartTime) - .setFirstAttemptStartTime(attemptStartTime) - .setNumSelfAndUpstreamTasks(numSelfAndUpstreamTasks) - .setNumSelfAndUpstreamCompletedTasks(numSelfAndUpstreamComplete) - .setWithinDagPriority(withinDagPriority) - .build()) - .build(); + return TestLlapDaemonUtils.buildSubmitProtoRequest(fragmentNumber, appId.toString(), + dagId.getId(), vId.getId() , dagName, dagStartTime, attemptStartTime, + numSelfAndUpstreamTasks, numSelfAndUpstreamComplete, withinDagPriority, + new Credentials()); } @Test (timeout = 60000) - public void testWaitQueueComparator() throws InterruptedException { + public void testWaitQueueComparator() throws InterruptedException, IOException { TaskWrapper r1 = createTaskWrapper(createRequest(1, 2, 5, 100), false, 100000); TaskWrapper r2 = createTaskWrapper(createRequest(2, 4, 4, 200), false, 100000); TaskWrapper r3 = createTaskWrapper(createRequest(3, 6, 3, 300), false, 1000000); @@ -273,7 +245,7 @@ public void testWaitQueueComparatorCanFinish() throws InterruptedException { } @Test(timeout = 60000) - public void testWaitQueueComparatorWithinDagPriority() throws InterruptedException { + public void testWaitQueueComparatorWithinDagPriority() throws InterruptedException, IOException { TaskWrapper r1 = createTaskWrapper(createRequest(1, 1, 0, 100, 100, 10), false, 100000); TaskWrapper r2 = createTaskWrapper(createRequest(2, 1, 0, 100, 100, 1), false, 100000); TaskWrapper r3 = createTaskWrapper(createRequest(3, 1, 0, 100, 100, 5), false, 100000); @@ -291,7 +263,7 @@ public void testWaitQueueComparatorWithinDagPriority() throws InterruptedExcepti } @Test(timeout = 60000) - public void testWaitQueueComparatorWithinSameDagPriority() throws InterruptedException { + public void testWaitQueueComparatorWithinSameDagPriority() throws InterruptedException, IOException { TaskWrapper r1 = createTaskWrapper(createRequest(1, 1, 0, 10, 100, 10), true, 100000); TaskWrapper r2 = createTaskWrapper(createRequest(2, 1, 0, 10, 100, 10), true, 100000); TaskWrapper r3 = createTaskWrapper(createRequest(3, 1, 0, 10, 100, 10), true, 100000); @@ -309,7 +281,7 @@ public void testWaitQueueComparatorWithinSameDagPriority() throws InterruptedExc } @Test(timeout = 60000) - public void testWaitQueueComparatorParallelism() throws InterruptedException { + public void testWaitQueueComparatorParallelism() throws InterruptedException, IOException { TaskWrapper r1 = createTaskWrapper(createRequest(1, 10, 3, 100, 100, 1, "q1"), false, 100000); TaskWrapper r2 = createTaskWrapper(createRequest(2, 10, 7, 100, 100, 1, "q2"), false, 100000); TaskWrapper r3 = createTaskWrapper(createRequest(3, 10, 5, 100, 100, 1, "q3"), false, 100000); diff --git llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java index 5d4ce223d9..cffbffd8a1 100644 --- llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java +++ llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java @@ -14,6 +14,7 @@ package org.apache.hadoop.hive.llap.tezplugins; +import com.google.common.collect.Lists; import org.apache.hadoop.hive.llap.tezplugins.LlapTaskSchedulerService.NodeInfo; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hive.llap.registry.LlapServiceInstance; @@ -25,6 +26,7 @@ import java.nio.ByteBuffer; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -52,6 +54,8 @@ import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto; import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto; @@ -306,6 +310,38 @@ public void registerContainerEnd(ContainerId containerId, ContainerEndReason end void setError(CtxType ctx, Throwable t); } + public void registerDag(NodeInfo node, final OperationCallback callback) { + RegisterDagRequestProto.Builder builder = RegisterDagRequestProto.newBuilder(); + try { + RegisterDagRequestProto request = builder + .setQueryIdentifier(currentQueryIdentifierProto) + .setUser(user) + .setCredentialsBinary( + getCredentials(getContext() + .getCurrentDagInfo().getCredentials())).build(); + communicator.registerDag(request, node.getHost(), node.getRpcPort(), + new LlapProtocolClientProxy.ExecuteRequestCallback() { + @Override + public void setResponse(RegisterDagResponseProto response) { + callback.setDone(null, currentQueryIdentifierProto); + } + + @Override + public void indicateError(Throwable t) { + LOG.info("Error registering dag with" + + " appId=" + currentQueryIdentifierProto.getApplicationIdString() + + " dagId=" + currentQueryIdentifierProto.getDagIndex() + + " to node " + node.getHost()); + if (!processSendError(t)) { + callback.setError(null, t); + } + } + }); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + public void startUpdateGuaranteed(TezTaskAttemptID attemptId, NodeInfo assignedNode, boolean newState, final OperationCallback callback, final T ctx) { LlapNodeId nodeId = entityTracker.getNodeIdForTaskAttempt(attemptId); @@ -795,14 +831,10 @@ private SubmitWorkRequestProto constructSubmitWorkRequest(ContainerId containerI Preconditions.checkState(currentQueryIdentifierProto.getDagIndex() == taskSpec.getTaskAttemptID().getTaskID().getVertexID().getDAGId().getId()); - ByteBuffer credentialsBinary = credentialMap.get(currentQueryIdentifierProto); - if (credentialsBinary == null) { - credentialsBinary = serializeCredentials(getContext().getCurrentDagInfo().getCredentials()); - credentialMap.putIfAbsent(currentQueryIdentifierProto, credentialsBinary.duplicate()); - } else { - credentialsBinary = credentialsBinary.duplicate(); - } - builder.setCredentialsBinary(ByteString.copyFrom(credentialsBinary)); + + + builder.setCredentialsBinary( + getCredentials(getContext().getCurrentDagInfo().getCredentials())); builder.setWorkSpec(VertexOrBinary.newBuilder().setVertex(Converters.constructSignableVertexSpec( taskSpec, currentQueryIdentifierProto, getTokenIdentifier(), user, hiveQueryId)).build()); // Don't call builder.setWorkSpecSignature() - Tez doesn't sign fragments @@ -814,16 +846,17 @@ private SubmitWorkRequestProto constructSubmitWorkRequest(ContainerId containerI return builder.build(); } - private ByteBuffer serializeCredentials(Credentials credentials) throws IOException { - Credentials containerCredentials = new Credentials(); - containerCredentials.addAll(credentials); - DataOutputBuffer containerTokens_dob = new DataOutputBuffer(); - containerCredentials.writeTokenStorageToStream(containerTokens_dob); - return ByteBuffer.wrap(containerTokens_dob.getData(), 0, containerTokens_dob.getLength()); + private ByteString getCredentials(Credentials credentials) throws IOException { + ByteBuffer credentialsBinary = credentialMap.get(currentQueryIdentifierProto); + if (credentialsBinary == null) { + credentialsBinary = LlapTezUtils.serializeCredentials(credentials); + credentialMap.putIfAbsent(currentQueryIdentifierProto, credentialsBinary.duplicate()); + } else { + credentialsBinary = credentialsBinary.duplicate(); + } + return ByteString.copyFrom(credentialsBinary); } - - protected class LlapTaskUmbilicalProtocolImpl implements LlapTaskUmbilicalProtocol { private final TezTaskUmbilicalProtocol tezUmbilical; diff --git llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java index 7e8299d156..54f8bf42e1 100644 --- llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java +++ llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java @@ -16,7 +16,6 @@ import com.google.common.io.ByteArrayDataOutput; -import org.apache.hadoop.hive.registry.ServiceInstanceSet; import org.apache.hadoop.io.Text; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -71,6 +70,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.llap.metrics.LlapMetricsSystem; import org.apache.hadoop.hive.llap.metrics.MetricsUtils; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto; import org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto; import org.apache.hadoop.hive.llap.registry.LlapServiceInstance; import org.apache.hadoop.hive.llap.registry.LlapServiceInstanceSet; @@ -81,12 +81,8 @@ import org.apache.hadoop.hive.llap.tezplugins.helpers.MonotonicClock; import org.apache.hadoop.hive.llap.tezplugins.metrics.LlapTaskSchedulerMetrics; import org.apache.hadoop.hive.llap.tezplugins.scheduler.LoggingFutureCallback; -import org.apache.hadoop.hive.registry.ServiceInstanceStateChangeListener; -import org.apache.hadoop.hive.registry.impl.TezAmRegistryImpl; -import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; @@ -121,7 +117,6 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.google.common.collect.Sets; -import com.google.common.io.ByteArrayDataOutput; import com.google.common.io.ByteStreams; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; @@ -133,6 +128,7 @@ private static final Logger LOG = LoggerFactory.getLogger(LlapTaskSchedulerService.class); private static final Logger WM_LOG = LoggerFactory.getLogger("GuaranteedTasks"); private static final TaskStartComparator TASK_INFO_COMPARATOR = new TaskStartComparator(); + private final static Comparator PRIORITY_COMPARATOR = new Comparator() { @Override public int compare(Priority o1, Priority o2) { @@ -154,6 +150,31 @@ public void setError(TaskInfo ctx, Throwable t) { } } + private final class RegisterDagCallback implements OperationCallback { + private final LlapServiceInstance llapServiceInstance; + private final NodeInfo nodeInfo; + RegisterDagCallback (NodeInfo nodeInfo, LlapServiceInstance llapServiceInstance) { + this.nodeInfo = nodeInfo; + this.llapServiceInstance = llapServiceInstance; + } + @Override + public void setDone(Void v, QueryIdentifierProto result) { + LOG.info("Dag with" + + " appId=" + result.getApplicationIdString() + + " dagId=" + result.getDagIndex() + + " registered successfully for node " + nodeInfo.getHost()); + addNode(nodeInfo, llapServiceInstance); + } + + @Override + public void setError(Void v, Throwable t) { + LOG.warn("Error registering dag for node " + nodeInfo.getHost(), t); + // In case we fail to register the dag we add the node anyway + // We will try to register the dag when we schedule the first container + addNode(nodeInfo, llapServiceInstance); + } + } + // TODO: this is an ugly hack; see the same in LlapTaskCommunicator for discussion. // This only lives for the duration of the service init. static LlapTaskSchedulerService instance = null; @@ -761,7 +782,7 @@ public void run() { registry.registerStateChangeListener(new NodeStateChangeListener()); activeInstances = registry.getInstances(); for (LlapServiceInstance inst : activeInstances.getAll()) { - addNode(new NodeInfo(inst, nodeBlacklistConf, clock, + registerAndAddNode(new NodeInfo(inst, nodeBlacklistConf, clock, numSchedulableTasksPerNode, metrics), inst); } if (amRegistry != null) { @@ -788,7 +809,7 @@ protected void setServiceInstanceSet(LlapServiceInstanceSet serviceInstanceSet) public void onCreate(LlapServiceInstance serviceInstance, int ephSeqVersion) { LOG.info("Added node with identity: {} as a result of registry callback", serviceInstance.getWorkerIdentity()); - addNode(new NodeInfo(serviceInstance, nodeBlacklistConf, clock, + registerAndAddNode(new NodeInfo(serviceInstance, nodeBlacklistConf, clock, numSchedulableTasksPerNode, metrics), serviceInstance); } @@ -1510,6 +1531,10 @@ private SelectHostResult randomSelection(final List nodesWithFreeSlots return new SelectHostResult(randomNode); } + private void registerAndAddNode(NodeInfo node, LlapServiceInstance serviceInstance) { + communicator.registerDag(node, new RegisterDagCallback(node, serviceInstance)); + } + private void addNode(NodeInfo node, LlapServiceInstance serviceInstance) { // we have just added a new node. Signal timeout monitor to reset timer if (activeInstances.size() != 0 && timeoutFutureRef.get() != null) { diff --git llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTezUtils.java llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTezUtils.java index e4af660fff..016d395108 100644 --- llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTezUtils.java +++ llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTezUtils.java @@ -15,13 +15,18 @@ package org.apache.hadoop.hive.llap.tezplugins; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.security.Credentials; import org.apache.tez.dag.records.TezTaskAttemptID; import org.apache.tez.mapreduce.hadoop.MRInputHelpers; import org.apache.tez.mapreduce.input.MRInput; import org.apache.tez.mapreduce.input.MRInputLegacy; import org.apache.tez.mapreduce.input.MultiMRInput; +import java.io.IOException; +import java.nio.ByteBuffer; + @InterfaceAudience.Private public class LlapTezUtils { public static boolean isSourceOfInterest(String inputClassName) { @@ -48,4 +53,13 @@ public static String stripAttemptPrefix(final String s) { } return s; } + + public static ByteBuffer serializeCredentials(Credentials credentials) throws + IOException { + Credentials containerCredentials = new Credentials(); + containerCredentials.addAll(credentials); + DataOutputBuffer containerTokens_dob = new DataOutputBuffer(); + containerCredentials.writeTokenStorageToStream(containerTokens_dob); + return ByteBuffer.wrap(containerTokens_dob.getData(), 0, containerTokens_dob.getLength()); + } }