diff --git llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
index 493a2e1669..cb383ed5f8 100644
--- llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
+++ llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
@@ -20196,7 +20196,7 @@ public boolean hasKey() {
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
- com.google.protobuf.ByteString bs =
+ com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
@@ -20212,7 +20212,7 @@ public boolean hasKey() {
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
@@ -20560,7 +20560,7 @@ public boolean hasKey() {
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof String) {
- com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
@@ -20994,7 +20994,7 @@ public Builder mergeFrom(
/**
* repeated .MapEntry metrics = 1;
*/
- java.util.List
+ java.util.List
getMetricsList();
/**
* repeated .MapEntry metrics = 1;
@@ -21007,7 +21007,7 @@ public Builder mergeFrom(
/**
* repeated .MapEntry metrics = 1;
*/
- java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
+ java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
getMetricsOrBuilderList();
/**
* repeated .MapEntry metrics = 1;
@@ -21128,7 +21128,7 @@ public GetDaemonMetricsResponseProto parsePartialFrom(
/**
* repeated .MapEntry metrics = 1;
*/
- public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
+ public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
getMetricsOrBuilderList() {
return metrics_;
}
@@ -21410,7 +21410,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
metricsBuilder_ = null;
metrics_ = other.metrics_;
bitField0_ = (bitField0_ & ~0x00000001);
- metricsBuilder_ =
+ metricsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getMetricsFieldBuilder() : null;
} else {
@@ -21640,7 +21640,7 @@ public Builder removeMetrics(int index) {
/**
* repeated .MapEntry metrics = 1;
*/
- public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
+ public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
getMetricsOrBuilderList() {
if (metricsBuilder_ != null) {
return metricsBuilder_.getMessageOrBuilderList();
@@ -21666,12 +21666,12 @@ public Builder removeMetrics(int index) {
/**
* repeated .MapEntry metrics = 1;
*/
- public java.util.List
+ public java.util.List
getMetricsBuilderList() {
return getMetricsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
getMetricsFieldBuilder() {
if (metricsBuilder_ == null) {
metricsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
@@ -21696,137 +21696,999 @@ public Builder removeMetrics(int index) {
// @@protoc_insertion_point(class_scope:GetDaemonMetricsResponseProto)
}
+ public interface SetCapacityRequestProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional int32 executorNum = 1;
+ /**
+ * optional int32 executorNum = 1;
+ */
+ boolean hasExecutorNum();
+ /**
+ * optional int32 executorNum = 1;
+ */
+ int getExecutorNum();
+
+ // optional int32 queueSize = 2;
+ /**
+ * optional int32 queueSize = 2;
+ */
+ boolean hasQueueSize();
+ /**
+ * optional int32 queueSize = 2;
+ */
+ int getQueueSize();
+ }
/**
- * Protobuf service {@code LlapDaemonProtocol}
+ * Protobuf type {@code SetCapacityRequestProto}
*/
- public static abstract class LlapDaemonProtocol
- implements com.google.protobuf.Service {
- protected LlapDaemonProtocol() {}
+ public static final class SetCapacityRequestProto extends
+ com.google.protobuf.GeneratedMessage
+ implements SetCapacityRequestProtoOrBuilder {
+ // Use SetCapacityRequestProto.newBuilder() to construct.
+ private SetCapacityRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private SetCapacityRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
- public interface Interface {
- /**
- * rpc registerDag(.RegisterDagRequestProto) returns (.RegisterDagResponseProto);
- */
- public abstract void registerDag(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto request,
- com.google.protobuf.RpcCallback done);
+ private static final SetCapacityRequestProto defaultInstance;
+ public static SetCapacityRequestProto getDefaultInstance() {
+ return defaultInstance;
+ }
- /**
- * rpc submitWork(.SubmitWorkRequestProto) returns (.SubmitWorkResponseProto);
- */
- public abstract void submitWork(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request,
- com.google.protobuf.RpcCallback done);
+ public SetCapacityRequestProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
- /**
- * rpc sourceStateUpdated(.SourceStateUpdatedRequestProto) returns (.SourceStateUpdatedResponseProto);
- */
- public abstract void sourceStateUpdated(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto request,
- com.google.protobuf.RpcCallback done);
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private SetCapacityRequestProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ executorNum_ = input.readInt32();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ queueSize_ = input.readInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityRequestProto_descriptor;
+ }
- /**
- * rpc queryComplete(.QueryCompleteRequestProto) returns (.QueryCompleteResponseProto);
- */
- public abstract void queryComplete(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request,
- com.google.protobuf.RpcCallback done);
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityRequestProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.Builder.class);
+ }
- /**
- * rpc terminateFragment(.TerminateFragmentRequestProto) returns (.TerminateFragmentResponseProto);
- */
- public abstract void terminateFragment(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request,
- com.google.protobuf.RpcCallback done);
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public SetCapacityRequestProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new SetCapacityRequestProto(input, extensionRegistry);
+ }
+ };
- /**
- * rpc updateFragment(.UpdateFragmentRequestProto) returns (.UpdateFragmentResponseProto);
- */
- public abstract void updateFragment(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto request,
- com.google.protobuf.RpcCallback done);
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+ private int bitField0_;
+ // optional int32 executorNum = 1;
+ public static final int EXECUTORNUM_FIELD_NUMBER = 1;
+ private int executorNum_;
+ /**
+ * optional int32 executorNum = 1;
+ */
+ public boolean hasExecutorNum() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional int32 executorNum = 1;
+ */
+ public int getExecutorNum() {
+ return executorNum_;
}
- public static com.google.protobuf.Service newReflectiveService(
- final Interface impl) {
- return new LlapDaemonProtocol() {
- @java.lang.Override
- public void registerDag(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto request,
- com.google.protobuf.RpcCallback done) {
- impl.registerDag(controller, request, done);
- }
+ // optional int32 queueSize = 2;
+ public static final int QUEUESIZE_FIELD_NUMBER = 2;
+ private int queueSize_;
+ /**
+ * optional int32 queueSize = 2;
+ */
+ public boolean hasQueueSize() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional int32 queueSize = 2;
+ */
+ public int getQueueSize() {
+ return queueSize_;
+ }
- @java.lang.Override
- public void submitWork(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request,
- com.google.protobuf.RpcCallback done) {
- impl.submitWork(controller, request, done);
- }
+ private void initFields() {
+ executorNum_ = 0;
+ queueSize_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
- @java.lang.Override
- public void sourceStateUpdated(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto request,
- com.google.protobuf.RpcCallback done) {
- impl.sourceStateUpdated(controller, request, done);
- }
+ memoizedIsInitialized = 1;
+ return true;
+ }
- @java.lang.Override
- public void queryComplete(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request,
- com.google.protobuf.RpcCallback done) {
- impl.queryComplete(controller, request, done);
- }
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeInt32(1, executorNum_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeInt32(2, queueSize_);
+ }
+ getUnknownFields().writeTo(output);
+ }
- @java.lang.Override
- public void terminateFragment(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request,
- com.google.protobuf.RpcCallback done) {
- impl.terminateFragment(controller, request, done);
- }
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
- @java.lang.Override
- public void updateFragment(
- com.google.protobuf.RpcController controller,
- org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto request,
- com.google.protobuf.RpcCallback done) {
- impl.updateFragment(controller, request, done);
- }
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(1, executorNum_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(2, queueSize_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
- };
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
}
- public static com.google.protobuf.BlockingService
- newReflectiveBlockingService(final BlockingInterface impl) {
- return new com.google.protobuf.BlockingService() {
- public final com.google.protobuf.Descriptors.ServiceDescriptor
- getDescriptorForType() {
- return getDescriptor();
- }
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto) obj;
- public final com.google.protobuf.Message callBlockingMethod(
- com.google.protobuf.Descriptors.MethodDescriptor method,
- com.google.protobuf.RpcController controller,
- com.google.protobuf.Message request)
- throws com.google.protobuf.ServiceException {
- if (method.getService() != getDescriptor()) {
- throw new java.lang.IllegalArgumentException(
- "Service.callBlockingMethod() given method descriptor for " +
- "wrong service type.");
- }
- switch(method.getIndex()) {
+ boolean result = true;
+ result = result && (hasExecutorNum() == other.hasExecutorNum());
+ if (hasExecutorNum()) {
+ result = result && (getExecutorNum()
+ == other.getExecutorNum());
+ }
+ result = result && (hasQueueSize() == other.hasQueueSize());
+ if (hasQueueSize()) {
+ result = result && (getQueueSize()
+ == other.getQueueSize());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasExecutorNum()) {
+ hash = (37 * hash) + EXECUTORNUM_FIELD_NUMBER;
+ hash = (53 * hash) + getExecutorNum();
+ }
+ if (hasQueueSize()) {
+ hash = (37 * hash) + QUEUESIZE_FIELD_NUMBER;
+ hash = (53 * hash) + getQueueSize();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code SetCapacityRequestProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityRequestProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityRequestProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ executorNum_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ queueSize_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityRequestProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.executorNum_ = executorNum_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.queueSize_ = queueSize_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.getDefaultInstance()) return this;
+ if (other.hasExecutorNum()) {
+ setExecutorNum(other.getExecutorNum());
+ }
+ if (other.hasQueueSize()) {
+ setQueueSize(other.getQueueSize());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional int32 executorNum = 1;
+ private int executorNum_ ;
+ /**
+ * optional int32 executorNum = 1;
+ */
+ public boolean hasExecutorNum() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional int32 executorNum = 1;
+ */
+ public int getExecutorNum() {
+ return executorNum_;
+ }
+ /**
+ * optional int32 executorNum = 1;
+ */
+ public Builder setExecutorNum(int value) {
+ bitField0_ |= 0x00000001;
+ executorNum_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 executorNum = 1;
+ */
+ public Builder clearExecutorNum() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ executorNum_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 queueSize = 2;
+ private int queueSize_ ;
+ /**
+ * optional int32 queueSize = 2;
+ */
+ public boolean hasQueueSize() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional int32 queueSize = 2;
+ */
+ public int getQueueSize() {
+ return queueSize_;
+ }
+ /**
+ * optional int32 queueSize = 2;
+ */
+ public Builder setQueueSize(int value) {
+ bitField0_ |= 0x00000002;
+ queueSize_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 queueSize = 2;
+ */
+ public Builder clearQueueSize() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ queueSize_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:SetCapacityRequestProto)
+ }
+
+ static {
+ defaultInstance = new SetCapacityRequestProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:SetCapacityRequestProto)
+ }
+
+ public interface SetCapacityResponseProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code SetCapacityResponseProto}
+ */
+ public static final class SetCapacityResponseProto extends
+ com.google.protobuf.GeneratedMessage
+ implements SetCapacityResponseProtoOrBuilder {
+ // Use SetCapacityResponseProto.newBuilder() to construct.
+ private SetCapacityResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private SetCapacityResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final SetCapacityResponseProto defaultInstance;
+ public static SetCapacityResponseProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public SetCapacityResponseProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private SetCapacityResponseProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityResponseProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityResponseProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public SetCapacityResponseProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new SetCapacityResponseProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code SetCapacityResponseProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityResponseProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityResponseProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityResponseProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:SetCapacityResponseProto)
+ }
+
+ static {
+ defaultInstance = new SetCapacityResponseProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:SetCapacityResponseProto)
+ }
+
+ /**
+ * Protobuf service {@code LlapDaemonProtocol}
+ */
+ public static abstract class LlapDaemonProtocol
+ implements com.google.protobuf.Service {
+ protected LlapDaemonProtocol() {}
+
+ public interface Interface {
+ /**
+ * rpc registerDag(.RegisterDagRequestProto) returns (.RegisterDagResponseProto);
+ */
+ public abstract void registerDag(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc submitWork(.SubmitWorkRequestProto) returns (.SubmitWorkResponseProto);
+ */
+ public abstract void submitWork(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc sourceStateUpdated(.SourceStateUpdatedRequestProto) returns (.SourceStateUpdatedResponseProto);
+ */
+ public abstract void sourceStateUpdated(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc queryComplete(.QueryCompleteRequestProto) returns (.QueryCompleteResponseProto);
+ */
+ public abstract void queryComplete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc terminateFragment(.TerminateFragmentRequestProto) returns (.TerminateFragmentResponseProto);
+ */
+ public abstract void terminateFragment(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc updateFragment(.UpdateFragmentRequestProto) returns (.UpdateFragmentResponseProto);
+ */
+ public abstract void updateFragment(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
+ }
+
+ public static com.google.protobuf.Service newReflectiveService(
+ final Interface impl) {
+ return new LlapDaemonProtocol() {
+ @java.lang.Override
+ public void registerDag(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.registerDag(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void submitWork(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.submitWork(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void sourceStateUpdated(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.sourceStateUpdated(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void queryComplete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.queryComplete(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void terminateFragment(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.terminateFragment(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void updateFragment(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.updateFragment(controller, request, done);
+ }
+
+ };
+ }
+
+ public static com.google.protobuf.BlockingService
+ newReflectiveBlockingService(final BlockingInterface impl) {
+ return new com.google.protobuf.BlockingService() {
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final com.google.protobuf.Message callBlockingMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request)
+ throws com.google.protobuf.ServiceException {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callBlockingMethod() given method descriptor for " +
+ "wrong service type.");
+ }
+ switch(method.getIndex()) {
case 0:
return impl.registerDag(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto)request);
case 1:
@@ -22315,6 +23177,14 @@ public abstract void getDaemonMetrics(
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc setCapacity(.SetCapacityRequestProto) returns (.SetCapacityResponseProto);
+ */
+ public abstract void setCapacity(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -22344,6 +23214,14 @@ public void getDaemonMetrics(
impl.getDaemonMetrics(controller, request, done);
}
+ @java.lang.Override
+ public void setCapacity(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.setCapacity(controller, request, done);
+ }
+
};
}
@@ -22372,6 +23250,8 @@ public void getDaemonMetrics(
return impl.purgeCache(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto)request);
case 2:
return impl.getDaemonMetrics(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto)request);
+ case 3:
+ return impl.setCapacity(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -22392,6 +23272,8 @@ public void getDaemonMetrics(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -22412,6 +23294,8 @@ public void getDaemonMetrics(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -22444,6 +23328,14 @@ public abstract void getDaemonMetrics(
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc setCapacity(.SetCapacityRequestProto) returns (.SetCapacityResponseProto);
+ */
+ public abstract void setCapacity(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -22481,6 +23373,11 @@ public final void callMethod(
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
+ case 3:
+ this.setCapacity(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -22501,6 +23398,8 @@ public final void callMethod(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -22521,6 +23420,8 @@ public final void callMethod(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -22586,6 +23487,21 @@ public void getDaemonMetrics(
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.class,
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance()));
}
+
+ public void setCapacity(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(3),
+ controller,
+ request,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.class,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -22608,6 +23524,11 @@ public static BlockingInterface newBlockingStub(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto setCapacity(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -22652,6 +23573,18 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance());
}
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto setCapacity(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(3),
+ controller,
+ request,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance());
+ }
+
}
// @@protoc_insertion_point(class_scope:LlapManagementProtocol)
@@ -22802,6 +23735,16 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_GetDaemonMetricsResponseProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_SetCapacityRequestProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_SetCapacityRequestProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_SetCapacityResponseProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_SetCapacityResponseProto_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -22887,30 +23830,34 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
"_memory_bytes\030\001 \001(\003\"&\n\010MapEntry\022\013\n\003key\030\001" +
" \001(\t\022\r\n\005value\030\002 \001(\003\"\036\n\034GetDaemonMetricsR" +
"equestProto\";\n\035GetDaemonMetricsResponseP" +
- "roto\022\032\n\007metrics\030\001 \003(\0132\t.MapEntry*2\n\020Sour" +
- "ceStateProto\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNI" +
- "NG\020\002*E\n\024SubmissionStateProto\022\014\n\010ACCEPTED" +
- "\020\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICTED_OTHER\020\0032\337\003\n\022",
- "LlapDaemonProtocol\022B\n\013registerDag\022\030.Regi" +
- "sterDagRequestProto\032\031.RegisterDagRespons" +
- "eProto\022?\n\nsubmitWork\022\027.SubmitWorkRequest" +
- "Proto\032\030.SubmitWorkResponseProto\022W\n\022sourc" +
- "eStateUpdated\022\037.SourceStateUpdatedReques" +
- "tProto\032 .SourceStateUpdatedResponseProto" +
- "\022H\n\rqueryComplete\022\032.QueryCompleteRequest" +
- "Proto\032\033.QueryCompleteResponseProto\022T\n\021te" +
- "rminateFragment\022\036.TerminateFragmentReque" +
- "stProto\032\037.TerminateFragmentResponseProto",
- "\022K\n\016updateFragment\022\033.UpdateFragmentReque" +
- "stProto\032\034.UpdateFragmentResponseProto2\361\001" +
- "\n\026LlapManagementProtocol\022C\n\022getDelegatio" +
- "nToken\022\025.GetTokenRequestProto\032\026.GetToken" +
- "ResponseProto\022?\n\npurgeCache\022\027.PurgeCache" +
- "RequestProto\032\030.PurgeCacheResponseProto\022Q" +
- "\n\020getDaemonMetrics\022\035.GetDaemonMetricsReq" +
- "uestProto\032\036.GetDaemonMetricsResponseProt" +
- "oBH\n&org.apache.hadoop.hive.llap.daemon." +
- "rpcB\030LlapDaemonProtocolProtos\210\001\001\240\001\001"
+ "roto\022\032\n\007metrics\030\001 \003(\0132\t.MapEntry\"A\n\027SetC" +
+ "apacityRequestProto\022\023\n\013executorNum\030\001 \001(\005" +
+ "\022\021\n\tqueueSize\030\002 \001(\005\"\032\n\030SetCapacityRespon" +
+ "seProto*2\n\020SourceStateProto\022\017\n\013S_SUCCEED",
+ "ED\020\001\022\r\n\tS_RUNNING\020\002*E\n\024SubmissionStatePr" +
+ "oto\022\014\n\010ACCEPTED\020\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICT" +
+ "ED_OTHER\020\0032\337\003\n\022LlapDaemonProtocol\022B\n\013reg" +
+ "isterDag\022\030.RegisterDagRequestProto\032\031.Reg" +
+ "isterDagResponseProto\022?\n\nsubmitWork\022\027.Su" +
+ "bmitWorkRequestProto\032\030.SubmitWorkRespons" +
+ "eProto\022W\n\022sourceStateUpdated\022\037.SourceSta" +
+ "teUpdatedRequestProto\032 .SourceStateUpdat" +
+ "edResponseProto\022H\n\rqueryComplete\022\032.Query" +
+ "CompleteRequestProto\032\033.QueryCompleteResp",
+ "onseProto\022T\n\021terminateFragment\022\036.Termina" +
+ "teFragmentRequestProto\032\037.TerminateFragme" +
+ "ntResponseProto\022K\n\016updateFragment\022\033.Upda" +
+ "teFragmentRequestProto\032\034.UpdateFragmentR" +
+ "esponseProto2\265\002\n\026LlapManagementProtocol\022" +
+ "C\n\022getDelegationToken\022\025.GetTokenRequestP" +
+ "roto\032\026.GetTokenResponseProto\022?\n\npurgeCac" +
+ "he\022\027.PurgeCacheRequestProto\032\030.PurgeCache" +
+ "ResponseProto\022Q\n\020getDaemonMetrics\022\035.GetD" +
+ "aemonMetricsRequestProto\032\036.GetDaemonMetr",
+ "icsResponseProto\022B\n\013setCapacity\022\030.SetCap" +
+ "acityRequestProto\032\031.SetCapacityResponseP" +
+ "rotoBH\n&org.apache.hadoop.hive.llap.daem" +
+ "on.rpcB\030LlapDaemonProtocolProtos\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -23091,6 +24038,18 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetDaemonMetricsResponseProto_descriptor,
new java.lang.String[] { "Metrics", });
+ internal_static_SetCapacityRequestProto_descriptor =
+ getDescriptor().getMessageTypes().get(29);
+ internal_static_SetCapacityRequestProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_SetCapacityRequestProto_descriptor,
+ new java.lang.String[] { "ExecutorNum", "QueueSize", });
+ internal_static_SetCapacityResponseProto_descriptor =
+ getDescriptor().getMessageTypes().get(30);
+ internal_static_SetCapacityResponseProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_SetCapacityResponseProto_descriptor,
+ new java.lang.String[] { });
return null;
}
};
diff --git llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
index 05722cd92f..d4b8cce040 100644
--- llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
+++ llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
@@ -99,4 +99,14 @@ public GetTokenResponseProto getDelegationToken(RpcController controller,
throw new ServiceException(e);
}
}
+
+ @Override
+ public LlapDaemonProtocolProtos.SetCapacityResponseProto setCapacity(final RpcController controller,
+ final LlapDaemonProtocolProtos.SetCapacityRequestProto request) throws ServiceException {
+ try {
+ return getProxy().setCapacity(null, request);
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
}
diff --git llap-common/src/protobuf/LlapDaemonProtocol.proto llap-common/src/protobuf/LlapDaemonProtocol.proto
index 91dc368b6c..ffc3c81758 100644
--- llap-common/src/protobuf/LlapDaemonProtocol.proto
+++ llap-common/src/protobuf/LlapDaemonProtocol.proto
@@ -223,6 +223,14 @@ message GetDaemonMetricsResponseProto {
repeated MapEntry metrics = 1;
}
+message SetCapacityRequestProto {
+ optional int32 executorNum = 1;
+ optional int32 queueSize = 2;
+}
+
+message SetCapacityResponseProto {
+}
+
service LlapDaemonProtocol {
rpc registerDag(RegisterDagRequestProto) returns (RegisterDagResponseProto);
rpc submitWork(SubmitWorkRequestProto) returns (SubmitWorkResponseProto);
@@ -236,4 +244,5 @@ service LlapManagementProtocol {
rpc getDelegationToken(GetTokenRequestProto) returns (GetTokenResponseProto);
rpc purgeCache(PurgeCacheRequestProto) returns (PurgeCacheResponseProto);
rpc getDaemonMetrics(GetDaemonMetricsRequestProto) returns (GetDaemonMetricsResponseProto);
+ rpc setCapacity(SetCapacityRequestProto) returns (SetCapacityResponseProto);
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
index 582f518315..46827bcde1 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
@@ -27,6 +27,8 @@
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto;
public interface ContainerRunner {
@@ -47,4 +49,7 @@ TerminateFragmentResponseProto terminateFragment(
UpdateFragmentResponseProto updateFragment(
UpdateFragmentRequestProto request) throws IOException;
+
+ SetCapacityResponseProto setCapacity(
+ SetCapacityRequestProto request);
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
index 44e2fa92ab..27e1910ec7 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -40,6 +40,7 @@
import org.apache.hadoop.hive.llap.daemon.QueryFailedHandler;
import org.apache.hadoop.hive.llap.daemon.SchedulerFragmentCompletingListener;
import org.apache.hadoop.hive.llap.daemon.impl.LlapTokenChecker.LlapTokenInfo;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto;
@@ -59,6 +60,8 @@
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary;
import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
import org.apache.hadoop.hive.llap.security.LlapSignerImpl;
@@ -462,6 +465,13 @@ public UpdateFragmentResponseProto updateFragment(
.setResult(result).setIsGuaranteed(isGuaranteed).build();
}
+ @Override
+ public SetCapacityResponseProto setCapacity(
+ SetCapacityRequestProto request) {
+ executorService.setCapacity(request.getExecutorNum(), request.getQueueSize());
+ return SetCapacityResponseProto.newBuilder().build();
+ }
+
private String stringifySourceStateUpdateRequest(SourceStateUpdatedRequestProto request) {
StringBuilder sb = new StringBuilder();
QueryIdentifier queryIdentifier = new QueryIdentifier(request.getQueryIdentifier().getApplicationIdString(),
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/EvictingPriorityBlockingQueue.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/EvictingPriorityBlockingQueue.java
index 3fe198d633..7c634b67ce 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/EvictingPriorityBlockingQueue.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/EvictingPriorityBlockingQueue.java
@@ -19,6 +19,7 @@
import java.util.Comparator;
+import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -38,7 +39,8 @@
private final PriorityBlockingDeque deque;
private final Comparator comparator;
- private final int waitQueueSize;
+ @VisibleForTesting
+ int waitQueueSize;
private int currentSize = 0;
@@ -121,6 +123,10 @@ public synchronized int size() {
return currentSize;
}
+ public synchronized void setWaitQueueSize(int waitQueueSize) {
+ this.waitQueueSize = waitQueueSize;
+ }
+
@Override
public synchronized String toString() {
StringBuilder sb = new StringBuilder();
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index 20d8c61cf3..cbc5336e5d 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -57,6 +57,8 @@
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto;
import org.apache.hadoop.hive.llap.daemon.services.impl.LlapWebServices;
import org.apache.hadoop.hive.llap.io.api.LlapProxy;
import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
@@ -637,6 +639,12 @@ public UpdateFragmentResponseProto updateFragment(
return containerRunner.updateFragment(request);
}
+ @Override
+ public SetCapacityResponseProto setCapacity(
+ SetCapacityRequestProto request) {
+ return containerRunner.setCapacity(request);
+ }
+
@VisibleForTesting
public long getNumSubmissions() {
return numSubmissions.get();
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
index e508c5f128..bb03727e1d 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
@@ -328,6 +328,15 @@ public GetTokenResponseProto getDelegationToken(RpcController controller,
return responseProtoBuilder.build();
}
+ @Override
+ public LlapDaemonProtocolProtos.SetCapacityResponseProto setCapacity(final RpcController controller,
+ final LlapDaemonProtocolProtos.SetCapacityRequestProto request) throws ServiceException {
+ LlapDaemonProtocolProtos.SetCapacityResponseProto.Builder responseProtoBuilder =
+ LlapDaemonProtocolProtos.SetCapacityResponseProto.newBuilder();
+ containerRunner.setCapacity(request);
+ return responseProtoBuilder.build();
+ }
+
private boolean determineIfSigningIsRequired(UserGroupInformation callingUser) {
switch (isSigningRequiredConfig) {
case FALSE: return false;
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java
index 2205847568..a0357e62d0 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java
@@ -50,4 +50,11 @@
QueryIdentifier findQueryByFragment(String fragmentId);
boolean updateFragment(String fragmentId, boolean isGuaranteed);
+
+ /**
+ * Sets the scheduler executor and queue size.
+ * @param newExecutors New number of executors
+ * @param newWaitQueueSize New size of the queue
+ */
+ void setCapacity(int newExecutors, int newWaitQueueSize);
}
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
index 10d78bd1a3..270173abc1 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
@@ -110,8 +110,12 @@
final BlockingQueue preemptionQueue;
private final boolean enablePreemption;
private final ThreadPoolExecutor threadPoolExecutor;
- private final AtomicInteger numSlotsAvailable;
- private final int maxParallelExecutors;
+ @VisibleForTesting
+ AtomicInteger numSlotsAvailable;
+ @VisibleForTesting
+ int maxParallelExecutors;
+ private final int configuredMaxExecutors;
+ private final int configuredWaitingQueueSize;
private final Clock clock;
// Tracks running fragments, and completing fragments.
@@ -145,7 +149,8 @@ public TaskExecutorService(int numExecutors, int waitQueueSize,
final LlapQueueComparatorBase waitQueueComparator = createComparator(
waitQueueComparatorClassName);
- this.maxParallelExecutors = numExecutors;
+ this.maxParallelExecutors = this.configuredMaxExecutors = numExecutors;
+ this.configuredWaitingQueueSize = waitQueueSize;
this.waitQueue = new EvictingPriorityBlockingQueue<>(waitQueueComparator, waitQueueSize);
this.clock = clock == null ? new MonotonicClock() : clock;
this.threadPoolExecutor = new ThreadPoolExecutor(numExecutors, // core pool size
@@ -176,6 +181,31 @@ public TaskExecutorService(int numExecutors, int waitQueueSize,
Futures.addCallback(future, new WaitQueueWorkerCallback());
}
+ /**
+ * Sets the TaskExecutorService capacity to the new values. Both the number of executors and the
+ * queue size should be smaller than that original values, so we do not mess up with the other
+ * settings. Setting smaller capacity will not cancel or reject already executing or queued tasks
+ * in itself.
+ * @param newNumExecutors The new number of executors
+ * @param newWaitQueueSize The new number of wait queue size
+ */
+ @Override
+ public synchronized void setCapacity(int newNumExecutors, int newWaitQueueSize) {
+ if (newNumExecutors > configuredMaxExecutors) {
+ throw new IllegalArgumentException("Requested newNumExecutors=" + newNumExecutors
+ + " is greater than the configured maximum=" + configuredMaxExecutors);
+ }
+ if (newWaitQueueSize > configuredWaitingQueueSize) {
+ throw new IllegalArgumentException("Requested newWaitQueueSize=" + newWaitQueueSize
+ + " is greater than the configured maximum=" + configuredWaitingQueueSize);
+ }
+ numSlotsAvailable.addAndGet(newNumExecutors - maxParallelExecutors);
+ maxParallelExecutors = newNumExecutors;
+ waitQueue.setWaitQueueSize(newWaitQueueSize);
+ LOG.info("TaskExecutorService is setting capacity to: numExecutors=" + newNumExecutors
+ + ", waitQueueSize=" + newWaitQueueSize);
+ }
+
private LlapQueueComparatorBase createComparator(
String waitQueueComparatorClassName) {
final LlapQueueComparatorBase waitQueueComparator;
diff --git llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestEvictingPriorityBlockingQueue.java llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestEvictingPriorityBlockingQueue.java
index 1b550538d2..99e7b21e9a 100644
--- llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestEvictingPriorityBlockingQueue.java
+++ llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestEvictingPriorityBlockingQueue.java
@@ -90,6 +90,66 @@ public void test() throws InterruptedException {
assertEquals(7, queue.size());
}
+ /**
+ * Test if we are able to set the WaitQueueSize dynamically.
+ * @throws InterruptedException
+ */
+ @Test (timeout = 10000)
+ public void testSetWaitQueueSize() throws InterruptedException {
+ Element e;
+ EvictingPriorityBlockingQueue queue = new EvictingPriorityBlockingQueue<>(new ElementComparator(), 5);
+
+ Element[] elements = new Element[10];
+ for (int i = 0 ; i < elements.length ; i++) {
+ elements[i] = new Element(i);
+ }
+
+ assertNull(queue.offer(elements[0], 0));
+ assertNull(queue.offer(elements[1], 0));
+ assertNull(queue.offer(elements[2], 0));
+ assertNull(queue.offer(elements[3], 0));
+ //0,1,2,3
+
+ queue.setWaitQueueSize(3);
+
+ // Not enough space left in the queue
+ e = queue.offer(elements[4], 0);
+ assertEquals(e, elements[0]);
+ assertEquals(4, queue.size());
+ //1,2,3,4
+
+ e = queue.offer(elements[5], 1);
+ assertEquals(e, elements[1]);
+ assertEquals(4, queue.size());
+ //2,3,4,5
+
+ // With 2 more it should be enough space left
+ assertNull(queue.offer(elements[6], 2));
+ assertEquals(5, queue.size());
+ //2,3,4,5,6
+
+ e = queue.take();
+ assertEquals(elements[6], e); //Highest priority at this point should have come out.
+ //2,3,4,5
+
+ e = queue.take();
+ assertEquals(elements[5], e); //Highest priority at this point should have come out.
+ //2,3,4
+
+ e = queue.take();
+ assertEquals(elements[4], e); //Highest priority at this point should have come out.
+ assertEquals(2, queue.size());
+ //2,3
+
+ assertNull(queue.offer(elements[7], 0));
+ assertEquals(3, queue.size());
+ //2,3,7
+
+ e = queue.offer(elements[8], 0);
+ assertEquals(e, elements[2]);
+ //3,7,8
+ }
+
private static class Element {
public Element(int x) {
this.x = x;
diff --git llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java
index 91f8cd32bb..3cd39c9d58 100644
--- llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java
+++ llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java
@@ -39,6 +39,8 @@
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto;
import org.apache.hadoop.hive.llap.impl.LlapProtocolClientImpl;
import org.junit.Test;
@@ -145,4 +147,34 @@ public void testGetDaemonMetrics() throws ServiceException, IOException {
}
}
+ @Test(timeout = 10000)
+ public void testSetCapacity() throws ServiceException, IOException {
+ LlapDaemonConfiguration daemonConf = new LlapDaemonConfiguration();
+ int numHandlers = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_RPC_NUM_HANDLERS);
+ ContainerRunner containerRunnerMock = mock(ContainerRunner.class);
+ when(containerRunnerMock.setCapacity(any(SetCapacityRequestProto.class))).thenReturn(
+ SetCapacityResponseProto
+ .newBuilder()
+ .build());
+ LlapDaemonExecutorMetrics executorMetrics =
+ LlapDaemonExecutorMetrics.create("LLAP", "SessionId", numHandlers, new int[] {30, 60, 300}, 0, 0L, 0);
+ LlapProtocolServerImpl server =
+ new LlapProtocolServerImpl(null, numHandlers, containerRunnerMock,
+ new AtomicReference(), new AtomicReference(),
+ 0, 0, null, executorMetrics);
+
+ try {
+ server.init(new Configuration());
+ server.start();
+ InetSocketAddress serverAddr = server.getManagementBindAddress();
+
+ LlapManagementProtocolPB client =
+ new LlapManagementProtocolClientImpl(new Configuration(), serverAddr.getHostName(),
+ serverAddr.getPort(), null, null);
+ client.setCapacity(null,
+ SetCapacityRequestProto.newBuilder().setExecutorNum(1).setQueueSize(1).build());
+ } finally {
+ server.stop();
+ }
+ }
}
diff --git llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestTaskExecutorService.java llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestTaskExecutorService.java
index 8ea3ca16bc..0df8d079ed 100644
--- llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestTaskExecutorService.java
+++ llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestTaskExecutorService.java
@@ -385,6 +385,152 @@ public void testDoKillMultiple() throws InterruptedException {
assertTrue(victim2.wasPreempted());
}
+ /**
+ * Tests if we can decrease and increase th TaskExecutorService capacity on an active service.
+ * Already submitted tasks will not be cancelled or rejected because of this change only if a new
+ * task is submitted with higher priority
+ * @throws InterruptedException
+ */
+ @Test(timeout = 10000)
+ public void testSetCapacity() throws InterruptedException {
+ TaskExecutorServiceForTest taskExecutorService =
+ new TaskExecutorServiceForTest(2, 3, ShortestJobFirstComparator.class.getName(), true);
+
+ // Fourth is lower priority as a result of canFinish being set to false.
+ MockRequest r1 = createMockRequest(1, 1, 100, 200, true, 20000l, true);
+ MockRequest r2 = createMockRequest(1, 1, 100, 200, true, 20000l, true);
+ MockRequest r3 = createMockRequest(1, 1, 100, 200, true, 20000l, true);
+ MockRequest r4 = createMockRequest(1, 1, 100, 200, true, 20000l, false);
+ MockRequest r5 = createMockRequest(1, 1, 100, 200, true, 20000l, false);
+ MockRequest r6 = createMockRequest(1, 1, 100, 200, true, 20000l, false);
+ MockRequest r7 = createMockRequest(1, 1, 100, 200, true, 20000l, false);
+ MockRequest r8 = createMockRequest(1, 1, 100, 200, true, 20000l, false);
+ MockRequest r9 = createMockRequest(1, 1, 100, 200, true, 20000l, false);
+
+ taskExecutorService.init(new Configuration());
+ taskExecutorService.start();
+ try {
+ Scheduler.SubmissionState submissionState;
+ TaskExecutorServiceForTest.InternalCompletionListenerForTest icl;
+
+ // Schedule the first 4 tasks (2 to execute, 2 to the queue)
+ submissionState = taskExecutorService.schedule(r1);
+ assertEquals(Scheduler.SubmissionState.ACCEPTED, submissionState);
+
+ submissionState = taskExecutorService.schedule(r2);
+ assertEquals(Scheduler.SubmissionState.ACCEPTED, submissionState);
+
+ submissionState = taskExecutorService.schedule(r3);
+ assertEquals(Scheduler.SubmissionState.ACCEPTED, submissionState);
+
+ submissionState = taskExecutorService.schedule(r4);
+ assertEquals(Scheduler.SubmissionState.ACCEPTED, submissionState);
+ // TaskExecutorService: Executing: r1, r2 + Queued: r3, r4
+
+ awaitStartAndSchedulerRun(r1, taskExecutorService);
+ awaitStartAndSchedulerRun(r2, taskExecutorService);
+
+ // Check if the queue and the executing tasks are as expected
+ assertEquals(2, taskExecutorService.waitQueue.size());
+ assertEquals(0, taskExecutorService.numSlotsAvailable.get());
+
+ // Change the capacity
+ taskExecutorService.setCapacity(1, 1);
+
+ // Check that the actual queue size is not changed, but the available executor size is changed
+ assertEquals(1, taskExecutorService.waitQueue.waitQueueSize);
+ assertEquals(1, taskExecutorService.maxParallelExecutors);
+ assertEquals(2, taskExecutorService.waitQueue.size());
+ assertEquals(-1, taskExecutorService.numSlotsAvailable.get());
+
+ // Try to schedule one more task, it should be rejected now
+ submissionState = taskExecutorService.schedule(r5);
+ assertEquals(Scheduler.SubmissionState.REJECTED, submissionState);
+ // TaskExecutorService: Executing: r1, r2 + Queued: r3, r4
+
+ // Complete r1
+ r1.awaitStart();
+ r1.complete();
+ r1.awaitEnd();
+ icl = taskExecutorService.getInternalCompletionListenerForTest(r1.getRequestId());
+ icl.awaitCompletion();
+ // TaskExecutorService: Executing: r2 + Queued: r3, r4
+
+ // Check if it is really finished
+ assertEquals(2, taskExecutorService.waitQueue.size());
+ assertEquals(0, taskExecutorService.numSlotsAvailable.get());
+
+ // Complete r2
+ r2.awaitStart();
+ r2.complete();
+ r2.awaitEnd();
+ icl = taskExecutorService.getInternalCompletionListenerForTest(r2.getRequestId());
+ icl.awaitCompletion();
+ // TaskExecutorService: Executing: r3 + Queued: r4
+
+ // Wait for a scheduling attempt, after that wait queue should be reduced
+ awaitStartAndSchedulerRun(r3, taskExecutorService);
+ assertEquals(1, taskExecutorService.waitQueue.size());
+ assertEquals(0, taskExecutorService.numSlotsAvailable.get());
+
+ // Try to schedule one more task, it still should be rejected
+ submissionState = taskExecutorService.schedule(r6);
+ assertEquals(Scheduler.SubmissionState.REJECTED, submissionState);
+ // TaskExecutorService: Executing: r3 + Queued: r4
+
+ // Complete r3
+ r3.complete();
+ r3.awaitEnd();
+ icl = taskExecutorService.getInternalCompletionListenerForTest(r3.getRequestId());
+ icl.awaitCompletion();
+ // TaskExecutorService: Executing: r4 + Queued: -
+
+ // Try to schedule one more task, it still should accepted finally
+ submissionState = taskExecutorService.schedule(r7);
+ // TaskExecutorService: Executing: r4 + Queued: r7
+ assertEquals(Scheduler.SubmissionState.ACCEPTED, submissionState);
+
+ // Change back the capacity
+ taskExecutorService.setCapacity(2, 3);
+ assertEquals(3, taskExecutorService.waitQueue.waitQueueSize);
+ assertEquals(2, taskExecutorService.maxParallelExecutors);
+ // TaskExecutorService Executing: r4, r7 + Queued: -
+
+ // Wait for a scheduling attempt, the new task should be started
+ awaitStartAndSchedulerRun(r7, taskExecutorService);
+ assertEquals(0, taskExecutorService.waitQueue.size());
+ assertEquals(0, taskExecutorService.numSlotsAvailable.get());
+
+ submissionState = taskExecutorService.schedule(r8);
+ assertEquals(Scheduler.SubmissionState.ACCEPTED, submissionState);
+ // TaskExecutorService: Executing: r4, r7 + Queued: r8
+
+ submissionState = taskExecutorService.schedule(r9);
+ assertEquals(Scheduler.SubmissionState.ACCEPTED, submissionState);
+ // TaskExecutorService: Executing: r4, r7 + Queued: r8, r9
+
+ assertEquals(2, taskExecutorService.waitQueue.size());
+ assertEquals(0, taskExecutorService.numSlotsAvailable.get());
+
+ } finally {
+ taskExecutorService.shutDown(false);
+ }
+ }
+
+ @Test(timeout = 1000, expected = IllegalArgumentException.class)
+ public void testSetCapacityHighExecutors() {
+ TaskExecutorServiceForTest taskExecutorService =
+ new TaskExecutorServiceForTest(2, 3, ShortestJobFirstComparator.class.getName(), true);
+ taskExecutorService.setCapacity(3, 3);
+ }
+
+ @Test(timeout = 1000, expected = IllegalArgumentException.class)
+ public void testSetCapacityHighQueueSize() {
+ TaskExecutorServiceForTest taskExecutorService =
+ new TaskExecutorServiceForTest(2, 3, ShortestJobFirstComparator.class.getName(), true);
+ taskExecutorService.setCapacity(2, 5);
+ }
+
private void runPreemptionGraceTest(
MockRequest victim1, MockRequest victim2, int time) throws InterruptedException {
MockRequest preemptor = createMockRequest(3, 1, 100, 100, true, 20000l, false);