diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 93d6ff670210abf0dc1b4318431f16c6bf375a2f..4176339a6b8e57e69edb30140a42b2a8ba396fc7 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -4206,6 +4206,12 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal LLAP_IO_CVB_BUFFERED_SIZE("hive.llap.io.cvb.memory.consumption.", 1L << 30, "The amount of bytes used to buffer CVB between IO and Processor Threads default to 1GB, " + "this will be used to compute a best effort queue size for VRBs produced by a LLAP IO thread."), + LLAP_IO_PROACTIVE_EVICTION_ENABLED("hive.llap.io.proactive.eviction.enabled", true, + "If true proactive cache eviction is enabled, thus LLAP will proactively evict buffers" + + " that belong to dropped Hive entities (DBs, tables, partitions, or temp tables."), + LLAP_IO_PROACTIVE_EVICTION_ASYNC("hive.llap.io.proactive.eviction.async", true, + "If true proactive cache eviction is run on a separate thread. Setting this to false " + + "might be useful in testing scenarios only."), LLAP_IO_SHARE_OBJECT_POOLS("hive.llap.io.share.object.pools", false, "Whether to used shared object pools in LLAP IO. A safety flag."), LLAP_AUTO_ALLOW_UBER("hive.llap.auto.allow.uber", false, diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java b/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java index e5c4a00f3b01c2e5e368129cf10833f3b100e05f..a2f1ad704f6a5ee1ae329e9a0bee4961fc65c4c1 100644 --- a/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java +++ b/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.llap.io.api; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapred.InputFormat; @@ -33,5 +34,13 @@ * called when the system is idle. */ long purge(); + + /** + * Handles request to evict entities specified in the request object. + * @param request lists Hive entities (DB, table, etc..) whose LLAP buffers should be evicted. + * @return true if request was acknowledged (execution might be async), false if not. + */ + boolean evictEntity(LlapDaemonProtocolProtos.EvictEntityRequestProto request); + void initCacheOnlyInputFormat(InputFormat inputFormat); } diff --git a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java index cb383ed5f8a8c641946137494113384d8f999ea9..291bba8193cfe9257c6d90f672ed3572d5ced929 100644 --- a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java +++ b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java @@ -22558,6 +22558,2244 @@ public Builder mergeFrom( // @@protoc_insertion_point(class_scope:SetCapacityResponseProto) } + public interface EvictEntityRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string db_name = 1; + /** + * required string db_name = 1; + */ + boolean hasDbName(); + /** + * required string db_name = 1; + */ + java.lang.String getDbName(); + /** + * required string db_name = 1; + */ + com.google.protobuf.ByteString + getDbNameBytes(); + + // repeated .TableProto table = 2; + /** + * repeated .TableProto table = 2; + */ + java.util.List + getTableList(); + /** + * repeated .TableProto table = 2; + */ + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto getTable(int index); + /** + * repeated .TableProto table = 2; + */ + int getTableCount(); + /** + * repeated .TableProto table = 2; + */ + java.util.List + getTableOrBuilderList(); + /** + * repeated .TableProto table = 2; + */ + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder getTableOrBuilder( + int index); + } + /** + * Protobuf type {@code EvictEntityRequestProto} + */ + public static final class EvictEntityRequestProto extends + com.google.protobuf.GeneratedMessage + implements EvictEntityRequestProtoOrBuilder { + // Use EvictEntityRequestProto.newBuilder() to construct. + private EvictEntityRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private EvictEntityRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EvictEntityRequestProto defaultInstance; + public static EvictEntityRequestProto getDefaultInstance() { + return defaultInstance; + } + + public EvictEntityRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private EvictEntityRequestProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + dbName_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + table_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + table_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + table_ = java.util.Collections.unmodifiableList(table_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EvictEntityRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EvictEntityRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string db_name = 1; + public static final int DB_NAME_FIELD_NUMBER = 1; + private java.lang.Object dbName_; + /** + * required string db_name = 1; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string db_name = 1; + */ + public java.lang.String getDbName() { + java.lang.Object ref = dbName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + dbName_ = s; + } + return s; + } + } + /** + * required string db_name = 1; + */ + public com.google.protobuf.ByteString + getDbNameBytes() { + java.lang.Object ref = dbName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + dbName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .TableProto table = 2; + public static final int TABLE_FIELD_NUMBER = 2; + private java.util.List table_; + /** + * repeated .TableProto table = 2; + */ + public java.util.List getTableList() { + return table_; + } + /** + * repeated .TableProto table = 2; + */ + public java.util.List + getTableOrBuilderList() { + return table_; + } + /** + * repeated .TableProto table = 2; + */ + public int getTableCount() { + return table_.size(); + } + /** + * repeated .TableProto table = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto getTable(int index) { + return table_.get(index); + } + /** + * repeated .TableProto table = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder getTableOrBuilder( + int index) { + return table_.get(index); + } + + private void initFields() { + dbName_ = ""; + table_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasDbName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableCount(); i++) { + if (!getTable(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getDbNameBytes()); + } + for (int i = 0; i < table_.size(); i++) { + output.writeMessage(2, table_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getDbNameBytes()); + } + for (int i = 0; i < table_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, table_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto) obj; + + boolean result = true; + result = result && (hasDbName() == other.hasDbName()); + if (hasDbName()) { + result = result && getDbName() + .equals(other.getDbName()); + } + result = result && getTableList() + .equals(other.getTableList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasDbName()) { + hash = (37 * hash) + DB_NAME_FIELD_NUMBER; + hash = (53 * hash) + getDbName().hashCode(); + } + if (getTableCount() > 0) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTableList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code EvictEntityRequestProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.Builder.class); + } + + // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + dbName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (tableBuilder_ == null) { + table_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tableBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_descriptor; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto build() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto buildPartial() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.dbName_ = dbName_; + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + table_ = java.util.Collections.unmodifiableList(table_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto) { + return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto other) { + if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.getDefaultInstance()) return this; + if (other.hasDbName()) { + bitField0_ |= 0x00000001; + dbName_ = other.dbName_; + onChanged(); + } + if (tableBuilder_ == null) { + if (!other.table_.isEmpty()) { + if (table_.isEmpty()) { + table_ = other.table_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTableIsMutable(); + table_.addAll(other.table_); + } + onChanged(); + } + } else { + if (!other.table_.isEmpty()) { + if (tableBuilder_.isEmpty()) { + tableBuilder_.dispose(); + tableBuilder_ = null; + table_ = other.table_; + bitField0_ = (bitField0_ & ~0x00000002); + tableBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableFieldBuilder() : null; + } else { + tableBuilder_.addAllMessages(other.table_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasDbName()) { + + return false; + } + for (int i = 0; i < getTableCount(); i++) { + if (!getTable(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string db_name = 1; + private java.lang.Object dbName_ = ""; + /** + * required string db_name = 1; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string db_name = 1; + */ + public java.lang.String getDbName() { + java.lang.Object ref = dbName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + dbName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string db_name = 1; + */ + public com.google.protobuf.ByteString + getDbNameBytes() { + java.lang.Object ref = dbName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + dbName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string db_name = 1; + */ + public Builder setDbName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + dbName_ = value; + onChanged(); + return this; + } + /** + * required string db_name = 1; + */ + public Builder clearDbName() { + bitField0_ = (bitField0_ & ~0x00000001); + dbName_ = getDefaultInstance().getDbName(); + onChanged(); + return this; + } + /** + * required string db_name = 1; + */ + public Builder setDbNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + dbName_ = value; + onChanged(); + return this; + } + + // repeated .TableProto table = 2; + private java.util.List table_ = + java.util.Collections.emptyList(); + private void ensureTableIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + table_ = new java.util.ArrayList(table_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder> tableBuilder_; + + /** + * repeated .TableProto table = 2; + */ + public java.util.List getTableList() { + if (tableBuilder_ == null) { + return java.util.Collections.unmodifiableList(table_); + } else { + return tableBuilder_.getMessageList(); + } + } + /** + * repeated .TableProto table = 2; + */ + public int getTableCount() { + if (tableBuilder_ == null) { + return table_.size(); + } else { + return tableBuilder_.getCount(); + } + } + /** + * repeated .TableProto table = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto getTable(int index) { + if (tableBuilder_ == null) { + return table_.get(index); + } else { + return tableBuilder_.getMessage(index); + } + } + /** + * repeated .TableProto table = 2; + */ + public Builder setTable( + int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableIsMutable(); + table_.set(index, value); + onChanged(); + } else { + tableBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .TableProto table = 2; + */ + public Builder setTable( + int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder builderForValue) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.set(index, builderForValue.build()); + onChanged(); + } else { + tableBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableProto table = 2; + */ + public Builder addTable(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableIsMutable(); + table_.add(value); + onChanged(); + } else { + tableBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .TableProto table = 2; + */ + public Builder addTable( + int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableIsMutable(); + table_.add(index, value); + onChanged(); + } else { + tableBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .TableProto table = 2; + */ + public Builder addTable( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder builderForValue) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.add(builderForValue.build()); + onChanged(); + } else { + tableBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .TableProto table = 2; + */ + public Builder addTable( + int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder builderForValue) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.add(index, builderForValue.build()); + onChanged(); + } else { + tableBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableProto table = 2; + */ + public Builder addAllTable( + java.lang.Iterable values) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + super.addAll(values, table_); + onChanged(); + } else { + tableBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .TableProto table = 2; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tableBuilder_.clear(); + } + return this; + } + /** + * repeated .TableProto table = 2; + */ + public Builder removeTable(int index) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.remove(index); + onChanged(); + } else { + tableBuilder_.remove(index); + } + return this; + } + /** + * repeated .TableProto table = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder getTableBuilder( + int index) { + return getTableFieldBuilder().getBuilder(index); + } + /** + * repeated .TableProto table = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder getTableOrBuilder( + int index) { + if (tableBuilder_ == null) { + return table_.get(index); } else { + return tableBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .TableProto table = 2; + */ + public java.util.List + getTableOrBuilderList() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(table_); + } + } + /** + * repeated .TableProto table = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder addTableBuilder() { + return getTableFieldBuilder().addBuilder( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.getDefaultInstance()); + } + /** + * repeated .TableProto table = 2; + */ + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder addTableBuilder( + int index) { + return getTableFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.getDefaultInstance()); + } + /** + * repeated .TableProto table = 2; + */ + public java.util.List + getTableBuilderList() { + return getTableFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder>( + table_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + table_ = null; + } + return tableBuilder_; + } + + // @@protoc_insertion_point(builder_scope:EvictEntityRequestProto) + } + + static { + defaultInstance = new EvictEntityRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:EvictEntityRequestProto) + } + + public interface TableProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string table_name = 1; + /** + * required string table_name = 1; + */ + boolean hasTableName(); + /** + * required string table_name = 1; + */ + java.lang.String getTableName(); + /** + * required string table_name = 1; + */ + com.google.protobuf.ByteString + getTableNameBytes(); + + // repeated string part_key = 2; + /** + * repeated string part_key = 2; + */ + java.util.List + getPartKeyList(); + /** + * repeated string part_key = 2; + */ + int getPartKeyCount(); + /** + * repeated string part_key = 2; + */ + java.lang.String getPartKey(int index); + /** + * repeated string part_key = 2; + */ + com.google.protobuf.ByteString + getPartKeyBytes(int index); + + // repeated string part_val = 3; + /** + * repeated string part_val = 3; + */ + java.util.List + getPartValList(); + /** + * repeated string part_val = 3; + */ + int getPartValCount(); + /** + * repeated string part_val = 3; + */ + java.lang.String getPartVal(int index); + /** + * repeated string part_val = 3; + */ + com.google.protobuf.ByteString + getPartValBytes(int index); + } + /** + * Protobuf type {@code TableProto} + */ + public static final class TableProto extends + com.google.protobuf.GeneratedMessage + implements TableProtoOrBuilder { + // Use TableProto.newBuilder() to construct. + private TableProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableProto defaultInstance; + public static TableProto getDefaultInstance() { + return defaultInstance; + } + + public TableProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + tableName_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + partKey_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + partKey_.add(input.readBytes()); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + partVal_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000004; + } + partVal_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + partKey_ = new com.google.protobuf.UnmodifiableLazyStringList(partKey_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + partVal_ = new com.google.protobuf.UnmodifiableLazyStringList(partVal_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private java.lang.Object tableName_; + /** + * required string table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string table_name = 1; + */ + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + tableName_ = s; + } + return s; + } + } + /** + * required string table_name = 1; + */ + public com.google.protobuf.ByteString + getTableNameBytes() { + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated string part_key = 2; + public static final int PART_KEY_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList partKey_; + /** + * repeated string part_key = 2; + */ + public java.util.List + getPartKeyList() { + return partKey_; + } + /** + * repeated string part_key = 2; + */ + public int getPartKeyCount() { + return partKey_.size(); + } + /** + * repeated string part_key = 2; + */ + public java.lang.String getPartKey(int index) { + return partKey_.get(index); + } + /** + * repeated string part_key = 2; + */ + public com.google.protobuf.ByteString + getPartKeyBytes(int index) { + return partKey_.getByteString(index); + } + + // repeated string part_val = 3; + public static final int PART_VAL_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList partVal_; + /** + * repeated string part_val = 3; + */ + public java.util.List + getPartValList() { + return partVal_; + } + /** + * repeated string part_val = 3; + */ + public int getPartValCount() { + return partVal_.size(); + } + /** + * repeated string part_val = 3; + */ + public java.lang.String getPartVal(int index) { + return partVal_.get(index); + } + /** + * repeated string part_val = 3; + */ + public com.google.protobuf.ByteString + getPartValBytes(int index) { + return partVal_.getByteString(index); + } + + private void initFields() { + tableName_ = ""; + partKey_ = com.google.protobuf.LazyStringArrayList.EMPTY; + partVal_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTableNameBytes()); + } + for (int i = 0; i < partKey_.size(); i++) { + output.writeBytes(2, partKey_.getByteString(i)); + } + for (int i = 0; i < partVal_.size(); i++) { + output.writeBytes(3, partVal_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTableNameBytes()); + } + { + int dataSize = 0; + for (int i = 0; i < partKey_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(partKey_.getByteString(i)); + } + size += dataSize; + size += 1 * getPartKeyList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < partVal_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(partVal_.getByteString(i)); + } + size += dataSize; + size += 1 * getPartValList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto)) { + return super.equals(obj); + } + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && getPartKeyList() + .equals(other.getPartKeyList()); + result = result && getPartValList() + .equals(other.getPartValList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (getPartKeyCount() > 0) { + hash = (37 * hash) + PART_KEY_FIELD_NUMBER; + hash = (53 * hash) + getPartKeyList().hashCode(); + } + if (getPartValCount() > 0) { + hash = (37 * hash) + PART_VAL_FIELD_NUMBER; + hash = (53 * hash) + getPartValList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code TableProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder.class); + } + + // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + tableName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + partKey_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + partVal_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_descriptor; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto getDefaultInstanceForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.getDefaultInstance(); + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto build() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto buildPartial() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.tableName_ = tableName_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + partKey_ = new com.google.protobuf.UnmodifiableLazyStringList( + partKey_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.partKey_ = partKey_; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + partVal_ = new com.google.protobuf.UnmodifiableLazyStringList( + partVal_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.partVal_ = partVal_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto) { + return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto other) { + if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.getDefaultInstance()) return this; + if (other.hasTableName()) { + bitField0_ |= 0x00000001; + tableName_ = other.tableName_; + onChanged(); + } + if (!other.partKey_.isEmpty()) { + if (partKey_.isEmpty()) { + partKey_ = other.partKey_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensurePartKeyIsMutable(); + partKey_.addAll(other.partKey_); + } + onChanged(); + } + if (!other.partVal_.isEmpty()) { + if (partVal_.isEmpty()) { + partVal_ = other.partVal_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensurePartValIsMutable(); + partVal_.addAll(other.partVal_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string table_name = 1; + private java.lang.Object tableName_ = ""; + /** + * required string table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string table_name = 1; + */ + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + tableName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string table_name = 1; + */ + public com.google.protobuf.ByteString + getTableNameBytes() { + java.lang.Object ref = tableName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string table_name = 1; + */ + public Builder setTableName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tableName_ = value; + onChanged(); + return this; + } + /** + * required string table_name = 1; + */ + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + /** + * required string table_name = 1; + */ + public Builder setTableNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tableName_ = value; + onChanged(); + return this; + } + + // repeated string part_key = 2; + private com.google.protobuf.LazyStringList partKey_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensurePartKeyIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + partKey_ = new com.google.protobuf.LazyStringArrayList(partKey_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated string part_key = 2; + */ + public java.util.List + getPartKeyList() { + return java.util.Collections.unmodifiableList(partKey_); + } + /** + * repeated string part_key = 2; + */ + public int getPartKeyCount() { + return partKey_.size(); + } + /** + * repeated string part_key = 2; + */ + public java.lang.String getPartKey(int index) { + return partKey_.get(index); + } + /** + * repeated string part_key = 2; + */ + public com.google.protobuf.ByteString + getPartKeyBytes(int index) { + return partKey_.getByteString(index); + } + /** + * repeated string part_key = 2; + */ + public Builder setPartKey( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartKeyIsMutable(); + partKey_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string part_key = 2; + */ + public Builder addPartKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartKeyIsMutable(); + partKey_.add(value); + onChanged(); + return this; + } + /** + * repeated string part_key = 2; + */ + public Builder addAllPartKey( + java.lang.Iterable values) { + ensurePartKeyIsMutable(); + super.addAll(values, partKey_); + onChanged(); + return this; + } + /** + * repeated string part_key = 2; + */ + public Builder clearPartKey() { + partKey_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * repeated string part_key = 2; + */ + public Builder addPartKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartKeyIsMutable(); + partKey_.add(value); + onChanged(); + return this; + } + + // repeated string part_val = 3; + private com.google.protobuf.LazyStringList partVal_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensurePartValIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + partVal_ = new com.google.protobuf.LazyStringArrayList(partVal_); + bitField0_ |= 0x00000004; + } + } + /** + * repeated string part_val = 3; + */ + public java.util.List + getPartValList() { + return java.util.Collections.unmodifiableList(partVal_); + } + /** + * repeated string part_val = 3; + */ + public int getPartValCount() { + return partVal_.size(); + } + /** + * repeated string part_val = 3; + */ + public java.lang.String getPartVal(int index) { + return partVal_.get(index); + } + /** + * repeated string part_val = 3; + */ + public com.google.protobuf.ByteString + getPartValBytes(int index) { + return partVal_.getByteString(index); + } + /** + * repeated string part_val = 3; + */ + public Builder setPartVal( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartValIsMutable(); + partVal_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string part_val = 3; + */ + public Builder addPartVal( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartValIsMutable(); + partVal_.add(value); + onChanged(); + return this; + } + /** + * repeated string part_val = 3; + */ + public Builder addAllPartVal( + java.lang.Iterable values) { + ensurePartValIsMutable(); + super.addAll(values, partVal_); + onChanged(); + return this; + } + /** + * repeated string part_val = 3; + */ + public Builder clearPartVal() { + partVal_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * repeated string part_val = 3; + */ + public Builder addPartValBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartValIsMutable(); + partVal_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:TableProto) + } + + static { + defaultInstance = new TableProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TableProto) + } + + public interface EvictEntityResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool ack = 1; + /** + * required bool ack = 1; + */ + boolean hasAck(); + /** + * required bool ack = 1; + */ + boolean getAck(); + } + /** + * Protobuf type {@code EvictEntityResponseProto} + */ + public static final class EvictEntityResponseProto extends + com.google.protobuf.GeneratedMessage + implements EvictEntityResponseProtoOrBuilder { + // Use EvictEntityResponseProto.newBuilder() to construct. + private EvictEntityResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private EvictEntityResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EvictEntityResponseProto defaultInstance; + public static EvictEntityResponseProto getDefaultInstance() { + return defaultInstance; + } + + public EvictEntityResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private EvictEntityResponseProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + ack_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EvictEntityResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EvictEntityResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool ack = 1; + public static final int ACK_FIELD_NUMBER = 1; + private boolean ack_; + /** + * required bool ack = 1; + */ + public boolean hasAck() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool ack = 1; + */ + public boolean getAck() { + return ack_; + } + + private void initFields() { + ack_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasAck()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, ack_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, ack_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto) obj; + + boolean result = true; + result = result && (hasAck() == other.hasAck()); + if (hasAck()) { + result = result && (getAck() + == other.getAck()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasAck()) { + hash = (37 * hash) + ACK_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getAck()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code EvictEntityResponseProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.Builder.class); + } + + // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + ack_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_descriptor; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto build() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto buildPartial() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.ack_ = ack_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto) { + return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto other) { + if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance()) return this; + if (other.hasAck()) { + setAck(other.getAck()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasAck()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool ack = 1; + private boolean ack_ ; + /** + * required bool ack = 1; + */ + public boolean hasAck() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool ack = 1; + */ + public boolean getAck() { + return ack_; + } + /** + * required bool ack = 1; + */ + public Builder setAck(boolean value) { + bitField0_ |= 0x00000001; + ack_ = value; + onChanged(); + return this; + } + /** + * required bool ack = 1; + */ + public Builder clearAck() { + bitField0_ = (bitField0_ & ~0x00000001); + ack_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:EvictEntityResponseProto) + } + + static { + defaultInstance = new EvictEntityResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:EvictEntityResponseProto) + } + /** * Protobuf service {@code LlapDaemonProtocol} */ @@ -23185,6 +25423,14 @@ public abstract void setCapacity( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto request, com.google.protobuf.RpcCallback done); + /** + * rpc evictEntity(.EvictEntityRequestProto) returns (.EvictEntityResponseProto); + */ + public abstract void evictEntity( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -23222,6 +25468,14 @@ public void setCapacity( impl.setCapacity(controller, request, done); } + @java.lang.Override + public void evictEntity( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.evictEntity(controller, request, done); + } + }; } @@ -23252,6 +25506,8 @@ public void setCapacity( return impl.getDaemonMetrics(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto)request); case 3: return impl.setCapacity(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto)request); + case 4: + return impl.evictEntity(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23274,6 +25530,8 @@ public void setCapacity( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23296,6 +25554,8 @@ public void setCapacity( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance(); case 3: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23336,6 +25596,14 @@ public abstract void setCapacity( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto request, com.google.protobuf.RpcCallback done); + /** + * rpc evictEntity(.EvictEntityRequestProto) returns (.EvictEntityResponseProto); + */ + public abstract void evictEntity( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -23378,6 +25646,11 @@ public final void callMethod( com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 4: + this.evictEntity(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -23400,6 +25673,8 @@ public final void callMethod( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23422,6 +25697,8 @@ public final void callMethod( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance(); case 3: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23502,6 +25779,21 @@ public void setCapacity( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance())); } + + public void evictEntity( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.class, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -23529,6 +25821,11 @@ public static BlockingInterface newBlockingStub( com.google.protobuf.RpcController controller, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto evictEntity( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -23585,6 +25882,18 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance()); } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto evictEntity( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:LlapManagementProtocol) @@ -23745,6 +26054,21 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SetCapacityResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_EvictEntityRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_EvictEntityRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_TableProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_TableProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_EvictEntityResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_EvictEntityResponseProto_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -23833,31 +26157,38 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { "roto\022\032\n\007metrics\030\001 \003(\0132\t.MapEntry\"A\n\027SetC" + "apacityRequestProto\022\023\n\013executorNum\030\001 \001(\005" + "\022\021\n\tqueueSize\030\002 \001(\005\"\032\n\030SetCapacityRespon" + - "seProto*2\n\020SourceStateProto\022\017\n\013S_SUCCEED", - "ED\020\001\022\r\n\tS_RUNNING\020\002*E\n\024SubmissionStatePr" + - "oto\022\014\n\010ACCEPTED\020\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICT" + - "ED_OTHER\020\0032\337\003\n\022LlapDaemonProtocol\022B\n\013reg" + - "isterDag\022\030.RegisterDagRequestProto\032\031.Reg" + - "isterDagResponseProto\022?\n\nsubmitWork\022\027.Su" + - "bmitWorkRequestProto\032\030.SubmitWorkRespons" + - "eProto\022W\n\022sourceStateUpdated\022\037.SourceSta" + - "teUpdatedRequestProto\032 .SourceStateUpdat" + - "edResponseProto\022H\n\rqueryComplete\022\032.Query" + - "CompleteRequestProto\032\033.QueryCompleteResp", - "onseProto\022T\n\021terminateFragment\022\036.Termina" + - "teFragmentRequestProto\032\037.TerminateFragme" + - "ntResponseProto\022K\n\016updateFragment\022\033.Upda" + - "teFragmentRequestProto\032\034.UpdateFragmentR" + - "esponseProto2\265\002\n\026LlapManagementProtocol\022" + - "C\n\022getDelegationToken\022\025.GetTokenRequestP" + - "roto\032\026.GetTokenResponseProto\022?\n\npurgeCac" + - "he\022\027.PurgeCacheRequestProto\032\030.PurgeCache" + - "ResponseProto\022Q\n\020getDaemonMetrics\022\035.GetD" + - "aemonMetricsRequestProto\032\036.GetDaemonMetr", - "icsResponseProto\022B\n\013setCapacity\022\030.SetCap" + - "acityRequestProto\032\031.SetCapacityResponseP" + - "rotoBH\n&org.apache.hadoop.hive.llap.daem" + - "on.rpcB\030LlapDaemonProtocolProtos\210\001\001\240\001\001" + "seProto\"F\n\027EvictEntityRequestProto\022\017\n\007db", + "_name\030\001 \002(\t\022\032\n\005table\030\002 \003(\0132\013.TableProto\"" + + "D\n\nTableProto\022\022\n\ntable_name\030\001 \002(\t\022\020\n\010par" + + "t_key\030\002 \003(\t\022\020\n\010part_val\030\003 \003(\t\"\'\n\030EvictEn" + + "tityResponseProto\022\013\n\003ack\030\001 \002(\010*2\n\020Source" + + "StateProto\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNING" + + "\020\002*E\n\024SubmissionStateProto\022\014\n\010ACCEPTED\020\001" + + "\022\014\n\010REJECTED\020\002\022\021\n\rEVICTED_OTHER\020\0032\337\003\n\022Ll" + + "apDaemonProtocol\022B\n\013registerDag\022\030.Regist" + + "erDagRequestProto\032\031.RegisterDagResponseP" + + "roto\022?\n\nsubmitWork\022\027.SubmitWorkRequestPr", + "oto\032\030.SubmitWorkResponseProto\022W\n\022sourceS" + + "tateUpdated\022\037.SourceStateUpdatedRequestP" + + "roto\032 .SourceStateUpdatedResponseProto\022H" + + "\n\rqueryComplete\022\032.QueryCompleteRequestPr" + + "oto\032\033.QueryCompleteResponseProto\022T\n\021term" + + "inateFragment\022\036.TerminateFragmentRequest" + + "Proto\032\037.TerminateFragmentResponseProto\022K" + + "\n\016updateFragment\022\033.UpdateFragmentRequest" + + "Proto\032\034.UpdateFragmentResponseProto2\371\002\n\026" + + "LlapManagementProtocol\022C\n\022getDelegationT", + "oken\022\025.GetTokenRequestProto\032\026.GetTokenRe" + + "sponseProto\022?\n\npurgeCache\022\027.PurgeCacheRe" + + "questProto\032\030.PurgeCacheResponseProto\022Q\n\020" + + "getDaemonMetrics\022\035.GetDaemonMetricsReque" + + "stProto\032\036.GetDaemonMetricsResponseProto\022" + + "B\n\013setCapacity\022\030.SetCapacityRequestProto" + + "\032\031.SetCapacityResponseProto\022B\n\013evictEnti" + + "ty\022\030.EvictEntityRequestProto\032\031.EvictEnti" + + "tyResponseProtoBH\n&org.apache.hadoop.hiv" + + "e.llap.daemon.rpcB\030LlapDaemonProtocolPro", + "tos\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -24050,6 +26381,24 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetCapacityResponseProto_descriptor, new java.lang.String[] { }); + internal_static_EvictEntityRequestProto_descriptor = + getDescriptor().getMessageTypes().get(31); + internal_static_EvictEntityRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EvictEntityRequestProto_descriptor, + new java.lang.String[] { "DbName", "Table", }); + internal_static_TableProto_descriptor = + getDescriptor().getMessageTypes().get(32); + internal_static_TableProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TableProto_descriptor, + new java.lang.String[] { "TableName", "PartKey", "PartVal", }); + internal_static_EvictEntityResponseProto_descriptor = + getDescriptor().getMessageTypes().get(33); + internal_static_EvictEntityResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EvictEntityResponseProto_descriptor, + new java.lang.String[] { "Ack", }); return null; } }; diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java index d4b8cce0404c5df19e7671e3f9759e9ef820f2c1..86e3cb0f229c466ebe97a4cddbab2fe00a45cd49 100644 --- a/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java +++ b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java @@ -109,4 +109,15 @@ public GetTokenResponseProto getDelegationToken(RpcController controller, throw new ServiceException(e); } } + + @Override + public LlapDaemonProtocolProtos.EvictEntityResponseProto evictEntity( + RpcController controller, LlapDaemonProtocolProtos.EvictEntityRequestProto request) + throws ServiceException { + try { + return getProxy().evictEntity(null, request); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/llap-common/src/protobuf/LlapDaemonProtocol.proto b/llap-common/src/protobuf/LlapDaemonProtocol.proto index ffc3c8175830d328c79d54674e8e8e85221389a1..ee921ecaf30a94869d4c89d3a7c2c825b6c15722 100644 --- a/llap-common/src/protobuf/LlapDaemonProtocol.proto +++ b/llap-common/src/protobuf/LlapDaemonProtocol.proto @@ -231,6 +231,21 @@ message SetCapacityRequestProto { message SetCapacityResponseProto { } +message EvictEntityRequestProto { + required string db_name = 1; + repeated TableProto table = 2; +} + +message TableProto { + required string table_name = 1; + repeated string part_key = 2; + repeated string part_val = 3; +} + +message EvictEntityResponseProto { + required bool ack = 1; +} + service LlapDaemonProtocol { rpc registerDag(RegisterDagRequestProto) returns (RegisterDagResponseProto); rpc submitWork(SubmitWorkRequestProto) returns (SubmitWorkResponseProto); @@ -241,8 +256,9 @@ service LlapDaemonProtocol { } service LlapManagementProtocol { - rpc getDelegationToken(GetTokenRequestProto) returns (GetTokenResponseProto); - rpc purgeCache(PurgeCacheRequestProto) returns (PurgeCacheResponseProto); - rpc getDaemonMetrics(GetDaemonMetricsRequestProto) returns (GetDaemonMetricsResponseProto); - rpc setCapacity(SetCapacityRequestProto) returns (SetCapacityResponseProto); + rpc getDelegationToken (GetTokenRequestProto) returns (GetTokenResponseProto); + rpc purgeCache (PurgeCacheRequestProto) returns (PurgeCacheResponseProto); + rpc getDaemonMetrics (GetDaemonMetricsRequestProto) returns (GetDaemonMetricsResponseProto); + rpc setCapacity (SetCapacityRequestProto) returns (SetCapacityResponseProto); + rpc evictEntity (EvictEntityRequestProto) returns (EvictEntityResponseProto); } diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java index 733b30c7dda5ca051fb7950b3944620e026e0bc4..91334b2f631f97a6b6cb2a33c4fb45fa15ed320b 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java @@ -25,6 +25,7 @@ import java.util.concurrent.ConcurrentSkipListMap; import org.apache.hadoop.hive.common.io.CacheTag; +import org.apache.hadoop.hive.llap.ProactiveEviction; import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; import static java.util.stream.Collectors.joining; @@ -161,6 +162,10 @@ public long purge() { return realPolicy.purge(); } + @Override + public long evictEntity(ProactiveEviction.Request request) { + return realPolicy.evictEntity(request); + } @Override public long evictSomeBlocks(long memoryToReserve) { diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java index 689a5d5c2b74509f72c9ebd9a746f38488ea9a76..a53328cccc4380768442163f636ced111450ae15 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java @@ -21,6 +21,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hive.llap.ProactiveEviction; import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl; import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics; @@ -164,6 +165,18 @@ public long purge() { return evicted; } + public long evictEntity(ProactiveEviction.Request request) { + if (evictor == null) return 0; + long evicted = evictor.evictEntity(request); + if (evicted == 0) return 0; + long usedMem = -1; + do { + usedMem = usedMemory.get(); + } while (!usedMemory.compareAndSet(usedMem, usedMem - evicted)); + metrics.incrCacheCapacityUsed(-evicted); + return evicted; + } + @VisibleForTesting public long getCurrentUsedSize() { return usedMemory.get(); diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java index aa5ad66314b36bcb21306202b2c739fc54cee619..2fdc2fb94176a9f9d89339a658075767f41baa01 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.llap.cache; + +import org.apache.hadoop.hive.llap.ProactiveEviction; import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; /** @@ -75,4 +77,6 @@ * @return amount (bytes) of memory evicted. */ long purge(); + + long evictEntity(ProactiveEviction.Request request); } diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java index bdc6721f640765f1c7e81bb2c49c019f981e4def..fbcdeb6cad2c3118025d2ef2e9c84a813b406b27 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java @@ -25,6 +25,7 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.hadoop.hive.llap.LlapUtil; +import org.apache.hadoop.hive.llap.ProactiveEviction; import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl; @@ -71,6 +72,11 @@ public long purge() { return evicted; } + @Override + public long evictEntity(ProactiveEviction.Request request) { + return 0; + } + @Override public long evictSomeBlocks(long memoryToReserve) { return evictInternal(memoryToReserve, -1); diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java index 2afb899148183af5f1945fcf64ee5cb87d35df55..2bbb5858e9eb323e25df8cbd6c54f8f3aba50fec 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.llap.LlapUtil; +import org.apache.hadoop.hive.llap.ProactiveEviction; import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl; import org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapMetadataBuffer; @@ -240,6 +241,11 @@ public void setEvictionListener(EvictionListener listener) { this.evictionListener = listener; } + @Override + public long evictEntity(ProactiveEviction.Request request) { + return 0; + } + @Override public long purge() { long evicted = 0; diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java index e7a4fdd015346d4194159dc47584f9f6d38f03f9..6dd4351e82bee5460169636e2ea3748ca6edb94b 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java @@ -333,6 +333,23 @@ public GetTokenResponseProto getDelegationToken(RpcController controller, } } + @Override + public LlapDaemonProtocolProtos.EvictEntityResponseProto evictEntity( + RpcController controller, LlapDaemonProtocolProtos.EvictEntityRequestProto request) + throws ServiceException { + LlapDaemonProtocolProtos.EvictEntityResponseProto.Builder responseProtoBuilder = + LlapDaemonProtocolProtos.EvictEntityResponseProto.newBuilder(); + + LlapIo llapIo = LlapProxy.getIo(); + if (llapIo != null) { + llapIo.evictEntity(request); + responseProtoBuilder.setAck(true); + } else { + responseProtoBuilder.setAck(false); + } + return responseProtoBuilder.build(); + } + private boolean determineIfSigningIsRequired(UserGroupInformation callingUser) { switch (isSigningRequiredConfig) { case FALSE: return false; diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java index fadefa20bc49c7c1e3e62de7c63799cd1d7e293d..544dcbd2616662e6a919a8369bb798e9d208c622 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java @@ -23,12 +23,14 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import javax.management.ObjectName; import org.apache.hadoop.hive.common.io.CacheTag; +import org.apache.hadoop.hive.llap.ProactiveEviction; import org.apache.hadoop.hive.llap.daemon.impl.StatsRecordingThreadPool; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,6 +60,7 @@ import org.apache.hadoop.hive.llap.cache.SimpleAllocator; import org.apache.hadoop.hive.llap.cache.SimpleBufferManager; import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; import org.apache.hadoop.hive.llap.io.api.LlapIo; import org.apache.hadoop.hive.llap.io.decode.ColumnVectorProducer; import org.apache.hadoop.hive.llap.io.decode.GenericColumnVectorProducer; @@ -104,6 +107,7 @@ private final BufferUsageManager bufferManager; private final Configuration daemonConf; private final LowLevelCacheMemoryManager memoryManager; + private final ExecutorService proactiveEvictionExecutor; private List debugDumpComponents = new ArrayList<>(); @@ -221,6 +225,9 @@ public void debugDumpShort(StringBuilder sb) { metadataCache, dataCache, bufferManagerOrc, conf, cacheMetrics, ioMetrics, tracePool); this.genericCvp = isEncodeEnabled ? new GenericColumnVectorProducer( serdeCache, bufferManagerGeneric, conf, cacheMetrics, ioMetrics, tracePool) : null; + proactiveEvictionExecutor = Executors.newSingleThreadExecutor( + new ThreadFactoryBuilder().setNameFormat("Proactive-Evictor-%d").setDaemon(true).build()); + LOG.info("LLAP IO initialized"); registerMXBeans(); @@ -245,6 +252,36 @@ public long purge() { return 0; } + public boolean evictEntity(LlapDaemonProtocolProtos.EvictEntityRequestProto protoRequest) { + if (memoryManager == null || + !HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_IO_PROACTIVE_EVICTION_ENABLED)) { + return false; + } + final ProactiveEviction.Request request = ProactiveEviction.Request.Builder.create() + .fromProtoRequest(protoRequest).build(); + Runnable evictionTask = new Runnable() { + @Override + public void run() { + long evictedBytes = memoryManager.evictEntity(request); + StringBuilder sb = new StringBuilder(); + sb.append("Evicted ").append(evictedBytes).append(" bytes from LLAP cache buffers that " + + "belong to table(s): "); + for (String table : request.getEntities().get(request.getSingleDbName()).keySet()) { + sb.append(table).append(" "); + } + sb.append("in DB: ").append(protoRequest.getDbName()); + LOG.info(sb.toString()); + } + }; + + if (HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_IO_PROACTIVE_EVICTION_ASYNC)) { + proactiveEvictionExecutor.submit(evictionTask); + } else { + evictionTask.run(); + } + return true; + } + @Override public InputFormat getInputFormat( InputFormat sourceInputFormat, Deserializer sourceSerDe) { diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java index 1c2eef2d5f38d60bfb1ba15cad7760bbf7ff03aa..adbffb69381a4581c6eba767653053c9a1282bd3 100644 --- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.common.io.DataCache.DiskRangeListFactory; import org.apache.hadoop.hive.common.io.DiskRangeList.CreateHelper; import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer; +import org.apache.hadoop.hive.llap.ProactiveEviction; import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics; import org.apache.hadoop.hive.ql.io.orc.encoded.CacheChunk; @@ -120,6 +121,11 @@ public long purge() { return 0; } + @Override + public long evictEntity(ProactiveEviction.Request request) { + return 0; + } + @Override public void debugDumpShort(StringBuilder sb) { } diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java index e5953e2b952f9ecf8209b114b4130fdbd3d7d292..e210ffe72f10d91b68accd9379e037f0f6cea94b 100644 --- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.common.io.DiskRange; import org.apache.hadoop.hive.common.io.DiskRangeList; +import org.apache.hadoop.hive.llap.ProactiveEviction; import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; import org.apache.hadoop.hive.llap.io.metadata.MetadataCache; import org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers; @@ -66,6 +67,11 @@ public long purge() { return 0; } + @Override + public long evictEntity(ProactiveEviction.Request request) { + return 0; + } + public void verifyEquals(int i) { assertEquals(i, lockCount); assertEquals(i, unlockCount); diff --git a/ql/src/java/org/apache/hadoop/hive/llap/ProactiveEviction.java b/ql/src/java/org/apache/hadoop/hive/llap/ProactiveEviction.java new file mode 100644 index 0000000000000000000000000000000000000000..260be33c8e15a36680ee47ff8b06429ad533f57f --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/llap/ProactiveEviction.java @@ -0,0 +1,290 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.llap; + +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import javax.net.SocketFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; +import org.apache.hadoop.hive.llap.impl.LlapManagementProtocolClientImpl; +import org.apache.hadoop.hive.llap.registry.LlapServiceInstance; +import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.net.NetUtils; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Through this class the caller (typically HS2) can request eviction of buffers from LLAP cache by specifying a DB, + * table or partition name/(value). Request sending is implemented here. + */ +public class ProactiveEviction { + + /** + * Trigger LLAP cache eviction of buffers related to entities residing in request parameter. + * @param conf + * @param request + */ + public static void evict(Configuration conf, Request request) { + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_IO_PROACTIVE_EVICTION_ENABLED)) { + return; + } + + try { + LlapRegistryService llapRegistryService = LlapRegistryService.getClient(conf); + Collection instances = llapRegistryService.getInstances().getAll(); + if (instances.size() == 0) { + // Not in LLAP mode. + return; + } + ExecutorService executorService = Executors.newCachedThreadPool(); + for (LlapServiceInstance instance : instances) { + Task task = new Task(conf, instance, request); + executorService.submit(task); + } + + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * The executable task to carry out request sending. + */ + public static class Task implements Callable { + public final Logger LOG = LoggerFactory.getLogger(Task.class); + private final Request request; + private Configuration conf; + private LlapServiceInstance instance; + private SocketFactory socketFactory; + private RetryPolicy retryPolicy; + + Task(Configuration conf, LlapServiceInstance llapServiceInstance, Request request) { + this.conf = conf; + this.instance = llapServiceInstance; + this.socketFactory = NetUtils.getDefaultSocketFactory(conf); + //not making this configurable, best effort + this.retryPolicy = RetryPolicies.retryUpToMaximumTimeWithFixedSleep( + 10000, 2000L, TimeUnit.MILLISECONDS); + this.request = request; + } + + @Override + public Void call() { + if (request.isEmpty()) { + throw new IllegalArgumentException("No entities set to trigger eviction on."); + } + try { + LlapManagementProtocolClientImpl client = new LlapManagementProtocolClientImpl(conf, instance.getHost(), + instance.getManagementPort(), retryPolicy, socketFactory); + + List protoRequests = request.toProtoRequests(); + + for (LlapDaemonProtocolProtos.EvictEntityRequestProto protoRequest : protoRequests) { + client.evictEntity(null, protoRequest); + } + LOG.info("Requested proactive eviction."); + } catch (Exception e) { + LOG.warn("Exception while requesting proactive eviction.", e); + } + return null; + } + } + + /** + * Holds information on entities: DB name(s), table name(s), partitions. + */ + public static class Request { + + private final Map>>> entities; + + private Request(Map>>> entities) { + this.entities = entities; + } + + public Map>>> getEntities() { + return entities; + } + + public boolean isEmpty() { + return entities.isEmpty(); + } + + public String getSingleDbName() { + if (entities.size() == 1) { + return entities.keySet().stream().findFirst().get(); + } + return null; + } + + /** + * Translate to Protobuf request. + * @return + */ + public List toProtoRequests() { + + List protoRequests = new LinkedList<>(); + + for (String dbName : entities.keySet()) { + Map>> tables = entities.get(dbName); + LlapDaemonProtocolProtos.EvictEntityRequestProto.Builder requestBuilder = + LlapDaemonProtocolProtos.EvictEntityRequestProto.newBuilder(); + LlapDaemonProtocolProtos.TableProto.Builder tableBuilder = null; + + requestBuilder.setDbName(dbName.toLowerCase()); + for (String tableName : tables.keySet()) { + tableBuilder = LlapDaemonProtocolProtos.TableProto.newBuilder(); + tableBuilder.setTableName(tableName.toLowerCase()); + + Set> partitions = tables.get(tableName); + LinkedHashSet partitionKeys = null; + + for (LinkedHashMap partitionSpec : partitions) { + if (partitionKeys == null) { + // For a given table the set of partition columns (keys) should not change. + partitionKeys = new LinkedHashSet<>(partitionSpec.keySet()); + tableBuilder.addAllPartKey(partitionKeys); + } + for (String partKey : tableBuilder.getPartKeyList()) { + tableBuilder.addPartVal(partitionSpec.get(partKey)); + } + } + requestBuilder.addTable(tableBuilder.build()); + } + protoRequests.add(requestBuilder.build()); + } + return protoRequests; + } + + /** + * Lets callers specify what entities are requested to be evicted, and builds a Request instance accordingly. + */ + public static class Builder { + + private final Map>>> entities; + + private Builder() { + this.entities = new HashMap<>(); + } + + public static Builder create() { + return new Builder(); + } + + public Builder addPartitionOfATable(String db, String tableName, LinkedHashMap partSpec) { + ensureDb(db); + ensureTable(db, tableName); + entities.get(db).get(tableName).add(partSpec); + return this; + } + + public Builder addDb(String db) { + ensureDb(db); + return this; + } + + public Builder addTable(String db, String table) { + ensureDb(db); + ensureTable(db, table); + return this; + } + + public Request build() { + return new Request(entities); + } + + private void ensureDb(String dbName) { + Map>> tables = entities.get(dbName); + if (tables == null) { + tables = new HashMap<>(); + entities.put(dbName, tables); + } + } + + private void ensureTable(String dbName, String tableName) { + ensureDb(dbName); + Map>> tables = entities.get(dbName); + + Set> partitions = tables.get(tableName); + if (partitions == null) { + partitions = new HashSet<>(); + tables.put(tableName, partitions); + } + } + + /** + * Translate from Protobuf request. + * @param protoRequest + * @return + */ + public Builder fromProtoRequest(LlapDaemonProtocolProtos.EvictEntityRequestProto protoRequest) { + entities.clear(); + String dbName = protoRequest.getDbName().toLowerCase(); + + Map>> entitiesInDb = new HashMap<>(); + List tables = protoRequest.getTableList(); + + if (tables != null && !tables.isEmpty()) { + for (LlapDaemonProtocolProtos.TableProto table : tables) { + String dbAndTableName = + (new StringBuilder().append(dbName).append('.').append(table.getTableName())).toString().toLowerCase(); + + if (table.getPartValCount() == 0) { + entitiesInDb.put(dbAndTableName, null); + continue; + } + Set> partitions = new HashSet<>(); + LinkedHashMap partDesc = new LinkedHashMap<>(); + + for (int valIx = 0; valIx < table.getPartValCount(); ++valIx) { + int keyIx = valIx % table.getPartKeyCount(); + + partDesc.put(table.getPartKey(keyIx).toLowerCase(), table.getPartVal(valIx)); + + if (keyIx == table.getPartKeyCount() - 1) { + partitions.add(partDesc); + partDesc = new LinkedHashMap<>(); + } + } + + entitiesInDb.put(dbAndTableName, partitions); + } + } + entities.put(dbName, entitiesInDb); + return this; + } + } + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/drop/DropDatabaseOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/drop/DropDatabaseOperation.java index 4ce89a983a54fc145c065df193ab8ff92e73c732..04698e8bf64ac221fc7a4fa363d141dd9f0dd536 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/drop/DropDatabaseOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/drop/DropDatabaseOperation.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.ql.ddl.database.drop; +import org.apache.hadoop.hive.llap.LlapDaemonInfo; +import org.apache.hadoop.hive.llap.ProactiveEviction; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -48,6 +50,13 @@ public int execute() throws HiveException { } context.getDb().dropDatabase(dbName, true, desc.getIfExists(), desc.isCasdade()); + + if (LlapDaemonInfo.INSTANCE.isLlapIo()) { + ProactiveEviction.Request.Builder llapEvictRequestBuilder = + ProactiveEviction.Request.Builder.create(); + llapEvictRequestBuilder.addDb(dbName); + ProactiveEviction.evict(context.getConf(), llapEvictRequestBuilder.build()); + } // Unregister the functions as well if (desc.isCasdade()) { FunctionRegistry.unregisterPermanentFunctions(dbName); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/drop/DropTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/drop/DropTableOperation.java index f4d1a35d72d1fc3fb0810e829796c399bbd8727f..9f48a67b05e856d6db7fa41a2908f7c827fcbecf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/drop/DropTableOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/drop/DropTableOperation.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hive.ql.ddl.table.drop; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.llap.LlapDaemonInfo; +import org.apache.hadoop.hive.llap.ProactiveEviction; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; @@ -28,6 +31,7 @@ import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.PartitionIterable; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.HiveTableName; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import com.google.common.collect.Iterables; @@ -102,6 +106,14 @@ public int execute() throws HiveException { context.getDb().dropTable(desc.getTableName(), desc.isPurge()); DDLUtils.addIfAbsentByName(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK), context); + if (LlapDaemonInfo.INSTANCE.isLlapIo()) { + TableName tableName = HiveTableName.of(table); + ProactiveEviction.Request.Builder llapEvictRequestBuilder = + ProactiveEviction.Request.Builder.create(); + llapEvictRequestBuilder.addTable(tableName.getDb(), tableName.getTable()); + ProactiveEviction.evict(context.getConf(), llapEvictRequestBuilder.build()); + } + return 0; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionOperation.java index 9d186db7af92fa5e4bbfa8579158044379c5c820..3a0e2fc77ee1e8b09d224bd1ce334f141a1846c2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionOperation.java @@ -23,6 +23,8 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.llap.LlapDaemonInfo; +import org.apache.hadoop.hive.llap.ProactiveEviction; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.ql.ddl.DDLOperation; @@ -117,10 +119,26 @@ private void dropPartitions() throws HiveException { PartitionDropOptions.instance().deleteData(true).ifExists(true).purgeData(desc.getIfPurge()); List droppedPartitions = context.getDb().dropPartitions(tablenName.getDb(), tablenName.getTable(), partitionExpressions, options); + + ProactiveEviction.Request.Builder llapEvictRequestBuilder = + LlapDaemonInfo.INSTANCE.isLlapIo() ? + ProactiveEviction.Request.Builder.create() : null; + for (Partition partition : droppedPartitions) { context.getConsole().printInfo("Dropped the partition " + partition.getName()); // We have already locked the table, don't lock the partitions. DDLUtils.addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK), context); + + context.getConsole().printInfo("Dropped the partition " + partition.getName()); + // We have already locked the table, don't lock the partitions. + DDLUtils.addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK), context); + + if (llapEvictRequestBuilder != null) { + llapEvictRequestBuilder.addPartitionOfATable(tablenName.getDb(), tablenName.getTable(), partition.getSpec()); + } + } + if (llapEvictRequestBuilder != null) { + ProactiveEviction.evict(context.getConf(), llapEvictRequestBuilder.build()); } } }