diff --git a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
index 76591404fd..b95b7daa9f 100644
--- a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
+++ b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
@@ -20050,6 +20050,1652 @@ public Builder clearPurgedMemoryBytes() {
// @@protoc_insertion_point(class_scope:PurgeCacheResponseProto)
}
+ public interface MapEntryOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string key = 1;
+ /**
+ * optional string key = 1;
+ */
+ boolean hasKey();
+ /**
+ * optional string key = 1;
+ */
+ java.lang.String getKey();
+ /**
+ * optional string key = 1;
+ */
+ com.google.protobuf.ByteString
+ getKeyBytes();
+
+ // optional int64 value = 2;
+ /**
+ * optional int64 value = 2;
+ */
+ boolean hasValue();
+ /**
+ * optional int64 value = 2;
+ */
+ long getValue();
+ }
+ /**
+ * Protobuf type {@code MapEntry}
+ */
+ public static final class MapEntry extends
+ com.google.protobuf.GeneratedMessage
+ implements MapEntryOrBuilder {
+ // Use MapEntry.newBuilder() to construct.
+ private MapEntry(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private MapEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final MapEntry defaultInstance;
+ public static MapEntry getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public MapEntry getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private MapEntry(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ key_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ value_ = input.readInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_MapEntry_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_MapEntry_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public MapEntry parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new MapEntry(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string key = 1;
+ public static final int KEY_FIELD_NUMBER = 1;
+ private java.lang.Object key_;
+ /**
+ * optional string key = 1;
+ */
+ public boolean hasKey() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string key = 1;
+ */
+ public java.lang.String getKey() {
+ java.lang.Object ref = key_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ key_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string key = 1;
+ */
+ public com.google.protobuf.ByteString
+ getKeyBytes() {
+ java.lang.Object ref = key_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ key_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int64 value = 2;
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private long value_;
+ /**
+ * optional int64 value = 2;
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional int64 value = 2;
+ */
+ public long getValue() {
+ return value_;
+ }
+
+ private void initFields() {
+ key_ = "";
+ value_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getKeyBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeInt64(2, value_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getKeyBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(2, value_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry) obj;
+
+ boolean result = true;
+ result = result && (hasKey() == other.hasKey());
+ if (hasKey()) {
+ result = result && getKey()
+ .equals(other.getKey());
+ }
+ result = result && (hasValue() == other.hasValue());
+ if (hasValue()) {
+ result = result && (getValue()
+ == other.getValue());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasKey()) {
+ hash = (37 * hash) + KEY_FIELD_NUMBER;
+ hash = (53 * hash) + getKey().hashCode();
+ }
+ if (hasValue()) {
+ hash = (37 * hash) + VALUE_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getValue());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code MapEntry}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_MapEntry_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_MapEntry_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ key_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_MapEntry_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.key_ = key_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.value_ = value_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.getDefaultInstance()) return this;
+ if (other.hasKey()) {
+ bitField0_ |= 0x00000001;
+ key_ = other.key_;
+ onChanged();
+ }
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string key = 1;
+ private java.lang.Object key_ = "";
+ /**
+ * optional string key = 1;
+ */
+ public boolean hasKey() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string key = 1;
+ */
+ public java.lang.String getKey() {
+ java.lang.Object ref = key_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ key_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string key = 1;
+ */
+ public com.google.protobuf.ByteString
+ getKeyBytes() {
+ java.lang.Object ref = key_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ key_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string key = 1;
+ */
+ public Builder setKey(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ key_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string key = 1;
+ */
+ public Builder clearKey() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ key_ = getDefaultInstance().getKey();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string key = 1;
+ */
+ public Builder setKeyBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ key_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 value = 2;
+ private long value_ ;
+ /**
+ * optional int64 value = 2;
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional int64 value = 2;
+ */
+ public long getValue() {
+ return value_;
+ }
+ /**
+ * optional int64 value = 2;
+ */
+ public Builder setValue(long value) {
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int64 value = 2;
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ value_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:MapEntry)
+ }
+
+ static {
+ defaultInstance = new MapEntry(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:MapEntry)
+ }
+
+ public interface GetDaemonMetricsRequestProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code GetDaemonMetricsRequestProto}
+ */
+ public static final class GetDaemonMetricsRequestProto extends
+ com.google.protobuf.GeneratedMessage
+ implements GetDaemonMetricsRequestProtoOrBuilder {
+ // Use GetDaemonMetricsRequestProto.newBuilder() to construct.
+ private GetDaemonMetricsRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private GetDaemonMetricsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final GetDaemonMetricsRequestProto defaultInstance;
+ public static GetDaemonMetricsRequestProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public GetDaemonMetricsRequestProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private GetDaemonMetricsRequestProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsRequestProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsRequestProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public GetDaemonMetricsRequestProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new GetDaemonMetricsRequestProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code GetDaemonMetricsRequestProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsRequestProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsRequestProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsRequestProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:GetDaemonMetricsRequestProto)
+ }
+
+ static {
+ defaultInstance = new GetDaemonMetricsRequestProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:GetDaemonMetricsRequestProto)
+ }
+
+ public interface GetDaemonMetricsResponseProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated .MapEntry metrics = 1;
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ java.util.List
+ getMetricsList();
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry getMetrics(int index);
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ int getMetricsCount();
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
+ getMetricsOrBuilderList();
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder getMetricsOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code GetDaemonMetricsResponseProto}
+ */
+ public static final class GetDaemonMetricsResponseProto extends
+ com.google.protobuf.GeneratedMessage
+ implements GetDaemonMetricsResponseProtoOrBuilder {
+ // Use GetDaemonMetricsResponseProto.newBuilder() to construct.
+ private GetDaemonMetricsResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private GetDaemonMetricsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final GetDaemonMetricsResponseProto defaultInstance;
+ public static GetDaemonMetricsResponseProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public GetDaemonMetricsResponseProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private GetDaemonMetricsResponseProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ metrics_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ metrics_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ metrics_ = java.util.Collections.unmodifiableList(metrics_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsResponseProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsResponseProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public GetDaemonMetricsResponseProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new GetDaemonMetricsResponseProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ // repeated .MapEntry metrics = 1;
+ public static final int METRICS_FIELD_NUMBER = 1;
+ private java.util.List metrics_;
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public java.util.List getMetricsList() {
+ return metrics_;
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
+ getMetricsOrBuilderList() {
+ return metrics_;
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public int getMetricsCount() {
+ return metrics_.size();
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry getMetrics(int index) {
+ return metrics_.get(index);
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder getMetricsOrBuilder(
+ int index) {
+ return metrics_.get(index);
+ }
+
+ private void initFields() {
+ metrics_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < metrics_.size(); i++) {
+ output.writeMessage(1, metrics_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < metrics_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, metrics_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto) obj;
+
+ boolean result = true;
+ result = result && getMetricsList()
+ .equals(other.getMetricsList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (getMetricsCount() > 0) {
+ hash = (37 * hash) + METRICS_FIELD_NUMBER;
+ hash = (53 * hash) + getMetricsList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code GetDaemonMetricsResponseProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsResponseProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsResponseProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getMetricsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (metricsBuilder_ == null) {
+ metrics_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ metricsBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsResponseProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto(this);
+ int from_bitField0_ = bitField0_;
+ if (metricsBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ metrics_ = java.util.Collections.unmodifiableList(metrics_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.metrics_ = metrics_;
+ } else {
+ result.metrics_ = metricsBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance()) return this;
+ if (metricsBuilder_ == null) {
+ if (!other.metrics_.isEmpty()) {
+ if (metrics_.isEmpty()) {
+ metrics_ = other.metrics_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureMetricsIsMutable();
+ metrics_.addAll(other.metrics_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.metrics_.isEmpty()) {
+ if (metricsBuilder_.isEmpty()) {
+ metricsBuilder_.dispose();
+ metricsBuilder_ = null;
+ metrics_ = other.metrics_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ metricsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getMetricsFieldBuilder() : null;
+ } else {
+ metricsBuilder_.addAllMessages(other.metrics_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // repeated .MapEntry metrics = 1;
+ private java.util.List metrics_ =
+ java.util.Collections.emptyList();
+ private void ensureMetricsIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ metrics_ = new java.util.ArrayList(metrics_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder> metricsBuilder_;
+
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public java.util.List getMetricsList() {
+ if (metricsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(metrics_);
+ } else {
+ return metricsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public int getMetricsCount() {
+ if (metricsBuilder_ == null) {
+ return metrics_.size();
+ } else {
+ return metricsBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry getMetrics(int index) {
+ if (metricsBuilder_ == null) {
+ return metrics_.get(index);
+ } else {
+ return metricsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public Builder setMetrics(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry value) {
+ if (metricsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMetricsIsMutable();
+ metrics_.set(index, value);
+ onChanged();
+ } else {
+ metricsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public Builder setMetrics(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder builderForValue) {
+ if (metricsBuilder_ == null) {
+ ensureMetricsIsMutable();
+ metrics_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ metricsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public Builder addMetrics(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry value) {
+ if (metricsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMetricsIsMutable();
+ metrics_.add(value);
+ onChanged();
+ } else {
+ metricsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public Builder addMetrics(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry value) {
+ if (metricsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMetricsIsMutable();
+ metrics_.add(index, value);
+ onChanged();
+ } else {
+ metricsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public Builder addMetrics(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder builderForValue) {
+ if (metricsBuilder_ == null) {
+ ensureMetricsIsMutable();
+ metrics_.add(builderForValue.build());
+ onChanged();
+ } else {
+ metricsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public Builder addMetrics(
+ int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder builderForValue) {
+ if (metricsBuilder_ == null) {
+ ensureMetricsIsMutable();
+ metrics_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ metricsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public Builder addAllMetrics(
+ java.lang.Iterable extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry> values) {
+ if (metricsBuilder_ == null) {
+ ensureMetricsIsMutable();
+ super.addAll(values, metrics_);
+ onChanged();
+ } else {
+ metricsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public Builder clearMetrics() {
+ if (metricsBuilder_ == null) {
+ metrics_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ metricsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public Builder removeMetrics(int index) {
+ if (metricsBuilder_ == null) {
+ ensureMetricsIsMutable();
+ metrics_.remove(index);
+ onChanged();
+ } else {
+ metricsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder getMetricsBuilder(
+ int index) {
+ return getMetricsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder getMetricsOrBuilder(
+ int index) {
+ if (metricsBuilder_ == null) {
+ return metrics_.get(index); } else {
+ return metricsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public java.util.List extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
+ getMetricsOrBuilderList() {
+ if (metricsBuilder_ != null) {
+ return metricsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(metrics_);
+ }
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder addMetricsBuilder() {
+ return getMetricsFieldBuilder().addBuilder(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.getDefaultInstance());
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder addMetricsBuilder(
+ int index) {
+ return getMetricsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.getDefaultInstance());
+ }
+ /**
+ * repeated .MapEntry metrics = 1;
+ */
+ public java.util.List
+ getMetricsBuilderList() {
+ return getMetricsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>
+ getMetricsFieldBuilder() {
+ if (metricsBuilder_ == null) {
+ metricsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>(
+ metrics_,
+ ((bitField0_ & 0x00000001) == 0x00000001),
+ getParentForChildren(),
+ isClean());
+ metrics_ = null;
+ }
+ return metricsBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:GetDaemonMetricsResponseProto)
+ }
+
+ static {
+ defaultInstance = new GetDaemonMetricsResponseProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:GetDaemonMetricsResponseProto)
+ }
+
/**
* Protobuf service {@code LlapDaemonProtocol}
*/
@@ -20661,6 +22307,14 @@ public abstract void purgeCache(
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc getDaemonMetrics(.GetDaemonMetricsRequestProto) returns (.GetDaemonMetricsResponseProto);
+ */
+ public abstract void getDaemonMetrics(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -20682,6 +22336,14 @@ public void purgeCache(
impl.purgeCache(controller, request, done);
}
+ @java.lang.Override
+ public void getDaemonMetrics(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ impl.getDaemonMetrics(controller, request, done);
+ }
+
};
}
@@ -20708,6 +22370,8 @@ public void purgeCache(
return impl.getDelegationToken(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto)request);
case 1:
return impl.purgeCache(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto)request);
+ case 2:
+ return impl.getDaemonMetrics(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -20726,6 +22390,8 @@ public void purgeCache(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.getDefaultInstance();
+ case 2:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -20744,6 +22410,8 @@ public void purgeCache(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance();
+ case 2:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -20768,6 +22436,14 @@ public abstract void purgeCache(
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc getDaemonMetrics(.GetDaemonMetricsRequestProto) returns (.GetDaemonMetricsResponseProto);
+ */
+ public abstract void getDaemonMetrics(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto request,
+ com.google.protobuf.RpcCallback done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -20800,6 +22476,11 @@ public final void callMethod(
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
+ case 2:
+ this.getDaemonMetrics(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -20818,6 +22499,8 @@ public final void callMethod(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.getDefaultInstance();
+ case 2:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -20836,6 +22519,8 @@ public final void callMethod(
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance();
+ case 2:
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -20886,6 +22571,21 @@ public void purgeCache(
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.class,
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance()));
}
+
+ public void getDaemonMetrics(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(2),
+ controller,
+ request,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.class,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -20903,6 +22603,11 @@ public static BlockingInterface newBlockingStub(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto getDaemonMetrics(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -20935,6 +22640,18 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance());
}
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto getDaemonMetrics(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(2),
+ controller,
+ request,
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance());
+ }
+
}
// @@protoc_insertion_point(class_scope:LlapManagementProtocol)
@@ -21070,6 +22787,21 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_PurgeCacheResponseProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MapEntry_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MapEntry_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_GetDaemonMetricsRequestProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_GetDaemonMetricsRequestProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_GetDaemonMetricsResponseProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_GetDaemonMetricsResponseProto_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -21152,28 +22884,33 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
"tputSocketInitMessage\022\023\n\013fragment_id\030\001 \002" +
"(\t\022\r\n\005token\030\002 \001(\014\"\030\n\026PurgeCacheRequestPr" +
"oto\"6\n\027PurgeCacheResponseProto\022\033\n\023purged" +
- "_memory_bytes\030\001 \001(\003*2\n\020SourceStateProto\022" +
- "\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNING\020\002*E\n\024Submi" +
- "ssionStateProto\022\014\n\010ACCEPTED\020\001\022\014\n\010REJECTE" +
- "D\020\002\022\021\n\rEVICTED_OTHER\020\0032\337\003\n\022LlapDaemonPro" +
- "tocol\022B\n\013registerDag\022\030.RegisterDagReques" +
- "tProto\032\031.RegisterDagResponseProto\022?\n\nsub" +
- "mitWork\022\027.SubmitWorkRequestProto\032\030.Submi",
- "tWorkResponseProto\022W\n\022sourceStateUpdated" +
- "\022\037.SourceStateUpdatedRequestProto\032 .Sour" +
- "ceStateUpdatedResponseProto\022H\n\rqueryComp" +
- "lete\022\032.QueryCompleteRequestProto\032\033.Query" +
- "CompleteResponseProto\022T\n\021terminateFragme" +
- "nt\022\036.TerminateFragmentRequestProto\032\037.Ter" +
- "minateFragmentResponseProto\022K\n\016updateFra" +
- "gment\022\033.UpdateFragmentRequestProto\032\034.Upd" +
- "ateFragmentResponseProto2\236\001\n\026LlapManagem" +
- "entProtocol\022C\n\022getDelegationToken\022\025.GetT",
- "okenRequestProto\032\026.GetTokenResponseProto" +
- "\022?\n\npurgeCache\022\027.PurgeCacheRequestProto\032" +
- "\030.PurgeCacheResponseProtoBH\n&org.apache." +
- "hadoop.hive.llap.daemon.rpcB\030LlapDaemonP" +
- "rotocolProtos\210\001\001\240\001\001"
+ "_memory_bytes\030\001 \001(\003\"&\n\010MapEntry\022\013\n\003key\030\001" +
+ " \001(\t\022\r\n\005value\030\002 \001(\003\"\036\n\034GetDaemonMetricsR" +
+ "equestProto\";\n\035GetDaemonMetricsResponseP" +
+ "roto\022\032\n\007metrics\030\001 \003(\0132\t.MapEntry*2\n\020Sour" +
+ "ceStateProto\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNI" +
+ "NG\020\002*E\n\024SubmissionStateProto\022\014\n\010ACCEPTED" +
+ "\020\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICTED_OTHER\020\0032\337\003\n\022",
+ "LlapDaemonProtocol\022B\n\013registerDag\022\030.Regi" +
+ "sterDagRequestProto\032\031.RegisterDagRespons" +
+ "eProto\022?\n\nsubmitWork\022\027.SubmitWorkRequest" +
+ "Proto\032\030.SubmitWorkResponseProto\022W\n\022sourc" +
+ "eStateUpdated\022\037.SourceStateUpdatedReques" +
+ "tProto\032 .SourceStateUpdatedResponseProto" +
+ "\022H\n\rqueryComplete\022\032.QueryCompleteRequest" +
+ "Proto\032\033.QueryCompleteResponseProto\022T\n\021te" +
+ "rminateFragment\022\036.TerminateFragmentReque" +
+ "stProto\032\037.TerminateFragmentResponseProto",
+ "\022K\n\016updateFragment\022\033.UpdateFragmentReque" +
+ "stProto\032\034.UpdateFragmentResponseProto2\361\001" +
+ "\n\026LlapManagementProtocol\022C\n\022getDelegatio" +
+ "nToken\022\025.GetTokenRequestProto\032\026.GetToken" +
+ "ResponseProto\022?\n\npurgeCache\022\027.PurgeCache" +
+ "RequestProto\032\030.PurgeCacheResponseProto\022Q" +
+ "\n\020getDaemonMetrics\022\035.GetDaemonMetricsReq" +
+ "uestProto\032\036.GetDaemonMetricsResponseProt" +
+ "oBH\n&org.apache.hadoop.hive.llap.daemon." +
+ "rpcB\030LlapDaemonProtocolProtos\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -21336,6 +23073,24 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_PurgeCacheResponseProto_descriptor,
new java.lang.String[] { "PurgedMemoryBytes", });
+ internal_static_MapEntry_descriptor =
+ getDescriptor().getMessageTypes().get(26);
+ internal_static_MapEntry_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_MapEntry_descriptor,
+ new java.lang.String[] { "Key", "Value", });
+ internal_static_GetDaemonMetricsRequestProto_descriptor =
+ getDescriptor().getMessageTypes().get(27);
+ internal_static_GetDaemonMetricsRequestProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_GetDaemonMetricsRequestProto_descriptor,
+ new java.lang.String[] { });
+ internal_static_GetDaemonMetricsResponseProto_descriptor =
+ getDescriptor().getMessageTypes().get(28);
+ internal_static_GetDaemonMetricsResponseProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_GetDaemonMetricsResponseProto_descriptor,
+ new java.lang.String[] { "Metrics", });
return null;
}
};
diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
index 2caae827f6..05722cd92f 100644
--- a/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
+++ b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
@@ -89,4 +89,14 @@ public GetTokenResponseProto getDelegationToken(RpcController controller,
throw new ServiceException(e);
}
}
+
+ @Override
+ public LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto getDaemonMetrics(final RpcController controller,
+ final LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto request) throws ServiceException {
+ try {
+ return getProxy().getDaemonMetrics(null, request);
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
}
diff --git a/llap-common/src/protobuf/LlapDaemonProtocol.proto b/llap-common/src/protobuf/LlapDaemonProtocol.proto
index 3aeacb2da5..91dc368b6c 100644
--- a/llap-common/src/protobuf/LlapDaemonProtocol.proto
+++ b/llap-common/src/protobuf/LlapDaemonProtocol.proto
@@ -211,6 +211,18 @@ message PurgeCacheResponseProto {
optional int64 purged_memory_bytes = 1;
}
+message MapEntry {
+ optional string key = 1;
+ optional int64 value = 2;
+}
+
+message GetDaemonMetricsRequestProto {
+}
+
+message GetDaemonMetricsResponseProto {
+ repeated MapEntry metrics = 1;
+}
+
service LlapDaemonProtocol {
rpc registerDag(RegisterDagRequestProto) returns (RegisterDagResponseProto);
rpc submitWork(SubmitWorkRequestProto) returns (SubmitWorkResponseProto);
@@ -223,4 +235,5 @@ service LlapDaemonProtocol {
service LlapManagementProtocol {
rpc getDelegationToken(GetTokenRequestProto) returns (GetTokenResponseProto);
rpc purgeCache(PurgeCacheRequestProto) returns (PurgeCacheResponseProto);
+ rpc getDaemonMetrics(GetDaemonMetricsRequestProto) returns (GetDaemonMetricsResponseProto);
}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index 14dc6326f2..76be2efa81 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -285,7 +285,7 @@ public LlapDaemon(Configuration daemonConf, int numExecutors, long executorMemor
}
this.secretManager = sm;
this.server = new LlapProtocolServerImpl(secretManager,
- numHandlers, this, srvAddress, mngAddress, srvPort, mngPort, daemonId);
+ numHandlers, this, srvAddress, mngAddress, srvPort, mngPort, daemonId, metrics);
UgiFactory fsUgiFactory = null;
try {
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
index c3a95c8b30..e508c5f128 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
@@ -17,6 +17,8 @@
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
+import java.util.HashMap;
+import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import com.google.common.io.ByteArrayDataOutput;
@@ -28,6 +30,12 @@
import org.apache.hadoop.hive.llap.io.api.LlapIo;
import org.apache.hadoop.hive.llap.io.api.LlapProxy;
+import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsTag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -79,11 +87,13 @@
private String clusterUser = null;
private boolean isRestrictedToClusterUser = false;
private final DaemonId daemonId;
+ private final LlapDaemonExecutorMetrics executorMetrics;
private TokenRequiresSigning isSigningRequiredConfig = TokenRequiresSigning.TRUE;
public LlapProtocolServerImpl(SecretManager secretManager, int numHandlers,
ContainerRunner containerRunner, AtomicReference srvAddress,
- AtomicReference mngAddress, int srvPort, int mngPort, DaemonId daemonId) {
+ AtomicReference mngAddress, int srvPort, int mngPort, DaemonId daemonId,
+ LlapDaemonExecutorMetrics executorMetrics) {
super("LlapDaemonProtocolServerImpl");
this.numHandlers = numHandlers;
this.containerRunner = containerRunner;
@@ -93,6 +103,7 @@ public LlapProtocolServerImpl(SecretManager secretManager, int numHandlers,
this.mngAddress = mngAddress;
this.mngPort = mngPort;
this.daemonId = daemonId;
+ this.executorMetrics = executorMetrics;
LOG.info("Creating: " + LlapProtocolServerImpl.class.getSimpleName() +
" with port configured to: " + srvPort);
}
@@ -302,6 +313,21 @@ public GetTokenResponseProto getDelegationToken(RpcController controller,
return responseProtoBuilder.build();
}
+ @Override
+ public LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto getDaemonMetrics(final RpcController controller,
+ final LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto request) throws ServiceException {
+ LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.Builder responseProtoBuilder =
+ LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.newBuilder();
+ if (executorMetrics != null) {
+ Map data = new HashMap<>();
+ DumpingMetricsCollector dmc = new DumpingMetricsCollector(data);
+ executorMetrics.getMetrics(dmc, true);
+ data.forEach((key, value) -> responseProtoBuilder.addMetrics(
+ LlapDaemonProtocolProtos.MapEntry.newBuilder().setKey(key).setValue(value).build()));
+ }
+ return responseProtoBuilder.build();
+ }
+
private boolean determineIfSigningIsRequired(UserGroupInformation callingUser) {
switch (isSigningRequiredConfig) {
case FALSE: return false;
@@ -313,4 +339,91 @@ private boolean determineIfSigningIsRequired(UserGroupInformation callingUser) {
default: throw new AssertionError("Unknown value " + isSigningRequiredConfig);
}
}
+
+ private final class DumpingMetricsRecordBuilder extends MetricsRecordBuilder {
+ private Map data;
+
+ private DumpingMetricsRecordBuilder(Map data) {
+ this.data = data;
+ }
+
+ @Override
+ public MetricsCollector parent() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public MetricsRecordBuilder tag(MetricsInfo metricsInfo, String s) {
+ return this;
+ }
+
+ @Override
+ public MetricsRecordBuilder add(MetricsTag metricsTag) {
+ return this;
+ }
+
+ @Override
+ public MetricsRecordBuilder add(AbstractMetric abstractMetric) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public MetricsRecordBuilder setContext(String s) {
+ return this;
+ }
+
+ @Override
+ public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, int i) {
+ data.put(metricsInfo.name(), Long.valueOf(i));
+ return this;
+ }
+
+ @Override
+ public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, long l) {
+ data.put(metricsInfo.name(), l);
+ return this;
+ }
+
+ @Override
+ public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, int i) {
+ data.put(metricsInfo.name(), Long.valueOf(i));
+ return this;
+ }
+
+ @Override
+ public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, long l) {
+ data.put(metricsInfo.name(), l);
+ return this;
+ }
+
+ @Override
+ public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, float v) {
+ data.put(metricsInfo.name(), Long.valueOf(Math.round(v)));
+ return this;
+ }
+
+ @Override
+ public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, double v) {
+ data.put(metricsInfo.name(), Math.round(v));
+ return this;
+ }
+ }
+
+ private final class DumpingMetricsCollector implements MetricsCollector {
+ private MetricsRecordBuilder mrb;
+
+ DumpingMetricsCollector(Map data) {
+ mrb = new DumpingMetricsRecordBuilder(data);
+ }
+
+ @Override
+ public MetricsRecordBuilder addRecord(String s) {
+ return mrb;
+ }
+
+ @Override
+ public MetricsRecordBuilder addRecord(MetricsInfo metricsInfo) {
+ return mrb;
+ }
+ }
}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java
index 5129b93249..077a4c9965 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java
@@ -397,4 +397,12 @@ public JvmMetrics getJvmMetrics() {
public String getName() {
return name;
}
+
+ public int getNumExecutorsAvailable() {
+ return numExecutorsAvailable.value();
+ }
+
+ public int getWaitQueueSize() {
+ return waitQueueSize.value();
+ }
}
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java
index 719518e487..bba7ebea75 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java
@@ -15,10 +15,13 @@
package org.apache.hadoop.hive.llap.daemon.impl;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.*;
import java.io.IOException;
import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import com.google.protobuf.ServiceException;
@@ -27,10 +30,15 @@
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration;
import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
+import org.apache.hadoop.hive.llap.impl.LlapManagementProtocolClientImpl;
+import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
+import org.apache.hadoop.hive.llap.protocol.LlapManagementProtocolPB;
import org.apache.hadoop.hive.llap.protocol.LlapProtocolBlockingPB;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto;
import org.apache.hadoop.hive.llap.impl.LlapProtocolClientImpl;
import org.junit.Test;
@@ -45,7 +53,7 @@ public void testSimpleCall() throws ServiceException, IOException {
LlapProtocolServerImpl server =
new LlapProtocolServerImpl(null, numHandlers, containerRunnerMock,
new AtomicReference(), new AtomicReference(),
- 0, 0, null);
+ 0, 0, null, null);
when(containerRunnerMock.submitWork(any(SubmitWorkRequestProto.class))).thenReturn(
SubmitWorkResponseProto
.newBuilder()
@@ -70,4 +78,71 @@ public void testSimpleCall() throws ServiceException, IOException {
server.stop();
}
}
+
+ @Test(timeout = 10000)
+ public void testGetDaemonMetrics() throws ServiceException, IOException {
+ LlapDaemonConfiguration daemonConf = new LlapDaemonConfiguration();
+ int numHandlers = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_RPC_NUM_HANDLERS);
+ LlapDaemonExecutorMetrics executorMetrics =
+ LlapDaemonExecutorMetrics.create("LLAP", "SessionId", numHandlers, new int[] {30, 60, 300});
+ LlapProtocolServerImpl server =
+ new LlapProtocolServerImpl(null, numHandlers, null,
+ new AtomicReference(), new AtomicReference(),
+ 0, 0, null, executorMetrics);
+ executorMetrics.addMetricsFallOffFailedTimeLost(10);
+ executorMetrics.addMetricsFallOffKilledTimeLost(11);
+ executorMetrics.addMetricsFallOffSuccessTimeLost(12);
+ executorMetrics.addMetricsPreemptionTimeLost(13);
+ executorMetrics.addMetricsPreemptionTimeToKill(14);
+ executorMetrics.incrExecutorTotalExecutionFailed();
+ executorMetrics.incrExecutorTotalKilled();
+ executorMetrics.incrExecutorTotalRequestsHandled();
+ executorMetrics.incrExecutorTotalSuccess();
+ executorMetrics.incrTotalEvictedFromWaitQueue();
+ executorMetrics.incrTotalRejectedRequests();
+ executorMetrics.setCacheMemoryPerInstance(15);
+ executorMetrics.setExecutorNumPreemptableRequests(16);
+ executorMetrics.setExecutorNumQueuedRequests(17);
+ executorMetrics.setJvmMaxMemory(18);
+ executorMetrics.setMemoryPerInstance(19);
+ executorMetrics.setNumExecutorsAvailable(20);
+ executorMetrics.setWaitQueueSize(21);
+
+ try {
+ server.init(new Configuration());
+ server.start();
+ InetSocketAddress serverAddr = server.getManagementBindAddress();
+
+ LlapManagementProtocolPB client =
+ new LlapManagementProtocolClientImpl(new Configuration(), serverAddr.getHostName(),
+ serverAddr.getPort(), null, null);
+ GetDaemonMetricsResponseProto responseProto = client.getDaemonMetrics(null,
+ GetDaemonMetricsRequestProto.newBuilder().build());
+ Map result = new HashMap<>(responseProto.getMetricsCount());
+ responseProto.getMetricsList().forEach(me -> result.put(me.getKey(), me.getValue()));
+
+ assertTrue("Checking ExecutorFallOffFailedTimeLost", result.get("ExecutorFallOffFailedTimeLost") == 10);
+ assertTrue("Checking ExecutorFallOffKilledTimeLost", result.get("ExecutorFallOffKilledTimeLost") == 11);
+ assertTrue("Checking ExecutorFallOffSuccessTimeLost", result.get("ExecutorFallOffSuccessTimeLost") == 12);
+ assertTrue("Checking ExecutorTotalPreemptionTimeLost", result.get("ExecutorTotalPreemptionTimeLost") == 13);
+ assertTrue("Checking ExecutorTotalPreemptionTimeToKill", result.get("ExecutorTotalPreemptionTimeToKill") == 14);
+ assertTrue("Checking ExecutorTotalFailed", result.get("ExecutorTotalFailed") == 1);
+ assertTrue("Checking ExecutorTotalKilled", result.get("ExecutorTotalKilled") == 1);
+ assertTrue("Checking ExecutorTotalRequestsHandled", result.get("ExecutorTotalRequestsHandled") == 1);
+ assertTrue("Checking ExecutorTotalSuccess", result.get("ExecutorTotalSuccess") == 1);
+ assertTrue("Checking ExecutorTotalEvictedFromWaitQueue", result.get("ExecutorTotalEvictedFromWaitQueue") == 1);
+ assertTrue("Checking ExecutorTotalRejectedRequests", result.get("ExecutorTotalRejectedRequests") == 1);
+ assertTrue("Checking ExecutorCacheMemoryPerInstance", result.get("ExecutorCacheMemoryPerInstance") == 15);
+ assertTrue("Checking ExecutorNumPreemptableRequests", result.get("ExecutorNumPreemptableRequests") == 16);
+ assertTrue("Checking ExecutorNumQueuedRequests", result.get("ExecutorNumQueuedRequests") == 17);
+ assertTrue("Checking ExecutorJvmMaxMemory", result.get("ExecutorJvmMaxMemory") == 18);
+ assertTrue("Checking ExecutorMemoryPerInstance", result.get("ExecutorMemoryPerInstance") == 19);
+ assertTrue("Checking ExecutorNumExecutorsAvailable", result.get("ExecutorNumExecutorsAvailable") == 20);
+ assertTrue("Checking ExecutorWaitQueueSize", result.get("ExecutorWaitQueueSize") == 21);
+
+ } finally {
+ server.stop();
+ }
+ }
+
}