diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 5888eaa..7a99c3d 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -2811,7 +2811,8 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_SERVER2_XSRF_FILTER_ENABLED("hive.server2.xsrf.filter.enabled",false, "If enabled, HiveServer2 will block any requests made to it over http " + "if an X-XSRF-HEADER header is not present"), - HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist", "set,reset,dfs,add,list,delete,reload,compile", + HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist", + "set,reset,dfs,add,list,delete,reload,compile,llap", "Comma separated list of non-SQL Hive commands users are authorized to execute"), HIVE_SERVER2_JOB_CREDENTIAL_PROVIDER_PATH("hive.server2.job.credential.provider.path", "", "If set, this configuration property should provide a comma-separated list of URLs that indicates the type and " + diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthorization.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthorization.java index 9e021ea..661de17 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthorization.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthorization.java @@ -18,6 +18,8 @@ package org.apache.hive.jdbc.authorization; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -159,6 +161,52 @@ public void testAllowedCommands() throws Exception { } @Test + public void testNoAuthZLlapClusterInfo() throws Exception { + // using different code blocks so that jdbc variables are not accidently re-used + // between the actions. Different connection/statement object should be used for each action. + { + Connection hs2Conn = getConnection("user1"); + boolean caughtException = false; + Statement stmt = hs2Conn.createStatement(); + try { + stmt.execute("set hive.llap.daemon.service.hosts=@localhost"); + stmt.execute("llap cluster -info"); + } catch (SQLException e) { + caughtException = true; + } finally { + stmt.close(); + hs2Conn.close(); + } + assertEquals(false, caughtException); + } + } + + + @Test + public void testAuthZFailureLlapCachePurge() throws Exception { + // using different code blocks so that jdbc variables are not accidently re-used + // between the actions. Different connection/statement object should be used for each action. + { + Connection hs2Conn = getConnection("user1"); + boolean caughtException = false; + Statement stmt = hs2Conn.createStatement(); + try { + stmt.execute("llap cache -purge"); + } catch (SQLException e) { + caughtException = true; + String msg = "Error while processing statement: Permission denied: Principal [name=user1, type=USER] " + + "does not have following privileges for operation LLAP_CACHE [[ADMIN PRIVILEGE] on Object [" + + "type=COMMAND_PARAMS, name=[-purge]]]"; + assertEquals(msg, e.getMessage()); + } finally { + stmt.close(); + hs2Conn.close(); + } + assertTrue("Exception expected ", caughtException); + } + } + + @Test public void testBlackListedUdfUsage() throws Exception { // create tables as user1 diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java b/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java index 6e6f5b9..e5c4a00 100644 --- a/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java +++ b/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java @@ -27,5 +27,11 @@ InputFormat sourceInputFormat, Deserializer serde); void close(); String getMemoryInfo(); + + /** + * purge is best effort and will just release the buffers that are unlocked (refCount == 0). This is typically + * called when the system is idle. + */ + long purge(); void initCacheOnlyInputFormat(InputFormat inputFormat); } diff --git a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java index 4753812..8fecc1e 100644 --- a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java +++ b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java @@ -18129,6 +18129,778 @@ public Builder clearToken() { // @@protoc_insertion_point(class_scope:LlapOutputSocketInitMessage) } + public interface PurgeCacheRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code PurgeCacheRequestProto} + */ + public static final class PurgeCacheRequestProto extends + com.google.protobuf.GeneratedMessage + implements PurgeCacheRequestProtoOrBuilder { + // Use PurgeCacheRequestProto.newBuilder() to construct. + private PurgeCacheRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PurgeCacheRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PurgeCacheRequestProto defaultInstance; + public static PurgeCacheRequestProto getDefaultInstance() { + return defaultInstance; + } + + public PurgeCacheRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PurgeCacheRequestProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PurgeCacheRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PurgeCacheRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code PurgeCacheRequestProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.Builder.class); + } + + // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheRequestProto_descriptor; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto build() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto buildPartial() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto) { + return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto other) { + if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:PurgeCacheRequestProto) + } + + static { + defaultInstance = new PurgeCacheRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:PurgeCacheRequestProto) + } + + public interface PurgeCacheResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 purged_memory_bytes = 1; + /** + * optional int64 purged_memory_bytes = 1; + */ + boolean hasPurgedMemoryBytes(); + /** + * optional int64 purged_memory_bytes = 1; + */ + long getPurgedMemoryBytes(); + } + /** + * Protobuf type {@code PurgeCacheResponseProto} + */ + public static final class PurgeCacheResponseProto extends + com.google.protobuf.GeneratedMessage + implements PurgeCacheResponseProtoOrBuilder { + // Use PurgeCacheResponseProto.newBuilder() to construct. + private PurgeCacheResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PurgeCacheResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PurgeCacheResponseProto defaultInstance; + public static PurgeCacheResponseProto getDefaultInstance() { + return defaultInstance; + } + + public PurgeCacheResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PurgeCacheResponseProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + purgedMemoryBytes_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PurgeCacheResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PurgeCacheResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int64 purged_memory_bytes = 1; + public static final int PURGED_MEMORY_BYTES_FIELD_NUMBER = 1; + private long purgedMemoryBytes_; + /** + * optional int64 purged_memory_bytes = 1; + */ + public boolean hasPurgedMemoryBytes() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 purged_memory_bytes = 1; + */ + public long getPurgedMemoryBytes() { + return purgedMemoryBytes_; + } + + private void initFields() { + purgedMemoryBytes_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, purgedMemoryBytes_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, purgedMemoryBytes_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto) obj; + + boolean result = true; + result = result && (hasPurgedMemoryBytes() == other.hasPurgedMemoryBytes()); + if (hasPurgedMemoryBytes()) { + result = result && (getPurgedMemoryBytes() + == other.getPurgedMemoryBytes()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasPurgedMemoryBytes()) { + hash = (37 * hash) + PURGED_MEMORY_BYTES_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getPurgedMemoryBytes()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code PurgeCacheResponseProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.Builder.class); + } + + // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + purgedMemoryBytes_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheResponseProto_descriptor; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto build() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto buildPartial() { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.purgedMemoryBytes_ = purgedMemoryBytes_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto) { + return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto other) { + if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance()) return this; + if (other.hasPurgedMemoryBytes()) { + setPurgedMemoryBytes(other.getPurgedMemoryBytes()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 purged_memory_bytes = 1; + private long purgedMemoryBytes_ ; + /** + * optional int64 purged_memory_bytes = 1; + */ + public boolean hasPurgedMemoryBytes() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 purged_memory_bytes = 1; + */ + public long getPurgedMemoryBytes() { + return purgedMemoryBytes_; + } + /** + * optional int64 purged_memory_bytes = 1; + */ + public Builder setPurgedMemoryBytes(long value) { + bitField0_ |= 0x00000001; + purgedMemoryBytes_ = value; + onChanged(); + return this; + } + /** + * optional int64 purged_memory_bytes = 1; + */ + public Builder clearPurgedMemoryBytes() { + bitField0_ = (bitField0_ & ~0x00000001); + purgedMemoryBytes_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:PurgeCacheResponseProto) + } + + static { + defaultInstance = new PurgeCacheResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:PurgeCacheResponseProto) + } + /** * Protobuf service {@code LlapDaemonProtocol} */ @@ -18661,6 +19433,14 @@ public abstract void getDelegationToken( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto request, com.google.protobuf.RpcCallback done); + /** + * rpc purgeCache(.PurgeCacheRequestProto) returns (.PurgeCacheResponseProto); + */ + public abstract void purgeCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -18674,6 +19454,14 @@ public void getDelegationToken( impl.getDelegationToken(controller, request, done); } + @java.lang.Override + public void purgeCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.purgeCache(controller, request, done); + } + }; } @@ -18698,6 +19486,8 @@ public void getDelegationToken( switch(method.getIndex()) { case 0: return impl.getDelegationToken(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto)request); + case 1: + return impl.purgeCache(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -18714,6 +19504,8 @@ public void getDelegationToken( switch(method.getIndex()) { case 0: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -18730,6 +19522,8 @@ public void getDelegationToken( switch(method.getIndex()) { case 0: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -18746,6 +19540,14 @@ public abstract void getDelegationToken( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto request, com.google.protobuf.RpcCallback done); + /** + * rpc purgeCache(.PurgeCacheRequestProto) returns (.PurgeCacheResponseProto); + */ + public abstract void purgeCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -18773,6 +19575,11 @@ public final void callMethod( com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 1: + this.purgeCache(controller, (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -18789,6 +19596,8 @@ public final void callMethod( switch(method.getIndex()) { case 0: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -18805,6 +19614,8 @@ public final void callMethod( switch(method.getIndex()) { case 0: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -18840,6 +19651,21 @@ public void getDelegationToken( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto.getDefaultInstance())); } + + public void purgeCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.class, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -18852,6 +19678,11 @@ public static BlockingInterface newBlockingStub( com.google.protobuf.RpcController controller, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto purgeCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -18872,6 +19703,18 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto.getDefaultInstance()); } + + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto purgeCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:LlapManagementProtocol) @@ -18987,6 +19830,16 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_LlapOutputSocketInitMessage_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_PurgeCacheRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_PurgeCacheRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_PurgeCacheResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_PurgeCacheResponseProto_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -19063,25 +19916,29 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { " \001(\010\"&\n\024GetTokenRequestProto\022\016\n\006app_id\030\001" + " \001(\t\"&\n\025GetTokenResponseProto\022\r\n\005token\030\001" + " \001(\014\"A\n\033LlapOutputSocketInitMessage\022\023\n\013f" + - "ragment_id\030\001 \002(\t\022\r\n\005token\030\002 \001(\014*2\n\020Sourc" + - "eStateProto\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNIN" + - "G\020\002*E\n\024SubmissionStateProto\022\014\n\010ACCEPTED\020", - "\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICTED_OTHER\020\0032\233\003\n\022L" + - "lapDaemonProtocol\022?\n\nsubmitWork\022\027.Submit" + - "WorkRequestProto\032\030.SubmitWorkResponsePro" + - "to\022W\n\022sourceStateUpdated\022\037.SourceStateUp" + - "datedRequestProto\032 .SourceStateUpdatedRe" + - "sponseProto\022H\n\rqueryComplete\022\032.QueryComp" + - "leteRequestProto\032\033.QueryCompleteResponse" + - "Proto\022T\n\021terminateFragment\022\036.TerminateFr" + - "agmentRequestProto\032\037.TerminateFragmentRe" + - "sponseProto\022K\n\016updateFragment\022\033.UpdateFr", - "agmentRequestProto\032\034.UpdateFragmentRespo" + - "nseProto2]\n\026LlapManagementProtocol\022C\n\022ge" + - "tDelegationToken\022\025.GetTokenRequestProto\032" + - "\026.GetTokenResponseProtoBH\n&org.apache.ha" + - "doop.hive.llap.daemon.rpcB\030LlapDaemonPro" + - "tocolProtos\210\001\001\240\001\001" + "ragment_id\030\001 \002(\t\022\r\n\005token\030\002 \001(\014\"\030\n\026Purge" + + "CacheRequestProto\"6\n\027PurgeCacheResponseP" + + "roto\022\033\n\023purged_memory_bytes\030\001 \001(\003*2\n\020Sou", + "rceStateProto\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNN" + + "ING\020\002*E\n\024SubmissionStateProto\022\014\n\010ACCEPTE" + + "D\020\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICTED_OTHER\020\0032\233\003\n" + + "\022LlapDaemonProtocol\022?\n\nsubmitWork\022\027.Subm" + + "itWorkRequestProto\032\030.SubmitWorkResponseP" + + "roto\022W\n\022sourceStateUpdated\022\037.SourceState" + + "UpdatedRequestProto\032 .SourceStateUpdated" + + "ResponseProto\022H\n\rqueryComplete\022\032.QueryCo" + + "mpleteRequestProto\032\033.QueryCompleteRespon" + + "seProto\022T\n\021terminateFragment\022\036.Terminate", + "FragmentRequestProto\032\037.TerminateFragment" + + "ResponseProto\022K\n\016updateFragment\022\033.Update" + + "FragmentRequestProto\032\034.UpdateFragmentRes" + + "ponseProto2\236\001\n\026LlapManagementProtocol\022C\n" + + "\022getDelegationToken\022\025.GetTokenRequestPro" + + "to\032\026.GetTokenResponseProto\022?\n\npurgeCache" + + "\022\027.PurgeCacheRequestProto\032\030.PurgeCacheRe" + + "sponseProtoBH\n&org.apache.hadoop.hive.ll" + + "ap.daemon.rpcB\030LlapDaemonProtocolProtos\210" + + "\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -19220,6 +20077,18 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LlapOutputSocketInitMessage_descriptor, new java.lang.String[] { "FragmentId", "Token", }); + internal_static_PurgeCacheRequestProto_descriptor = + getDescriptor().getMessageTypes().get(22); + internal_static_PurgeCacheRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_PurgeCacheRequestProto_descriptor, + new java.lang.String[] { }); + internal_static_PurgeCacheResponseProto_descriptor = + getDescriptor().getMessageTypes().get(23); + internal_static_PurgeCacheResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_PurgeCacheResponseProto_descriptor, + new java.lang.String[] { "PurgedMemoryBytes", }); return null; } }; diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java index af760b1..2caae82 100644 --- a/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java +++ b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java @@ -22,6 +22,7 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtocolProxy; @@ -78,4 +79,14 @@ public GetTokenResponseProto getDelegationToken(RpcController controller, throw new ServiceException(e); } } + + @Override + public LlapDaemonProtocolProtos.PurgeCacheResponseProto purgeCache(final RpcController controller, + final LlapDaemonProtocolProtos.PurgeCacheRequestProto request) throws ServiceException { + try { + return getProxy().purgeCache(null, request); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/protocol/LlapManagementProtocolPB.java b/llap-common/src/java/org/apache/hadoop/hive/llap/protocol/LlapManagementProtocolPB.java index ff215d4..717f45d 100644 --- a/llap-common/src/java/org/apache/hadoop/hive/llap/protocol/LlapManagementProtocolPB.java +++ b/llap-common/src/java/org/apache/hadoop/hive/llap/protocol/LlapManagementProtocolPB.java @@ -19,6 +19,8 @@ import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.security.KerberosInfo; +import com.google.protobuf.ServiceException; + @ProtocolInfo(protocolName = "org.apache.hadoop.hive.llap.protocol.LlapManagementProtocolPB", protocolVersion = 1) @KerberosInfo(serverPrincipal = HiveConf.HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME) @InterfaceAudience.Private diff --git a/llap-common/src/protobuf/LlapDaemonProtocol.proto b/llap-common/src/protobuf/LlapDaemonProtocol.proto index 12beca5..d70dd41 100644 --- a/llap-common/src/protobuf/LlapDaemonProtocol.proto +++ b/llap-common/src/protobuf/LlapDaemonProtocol.proto @@ -196,6 +196,13 @@ message LlapOutputSocketInitMessage { optional bytes token = 2; } +message PurgeCacheRequestProto { +} + +message PurgeCacheResponseProto { + optional int64 purged_memory_bytes = 1; +} + service LlapDaemonProtocol { rpc submitWork(SubmitWorkRequestProto) returns (SubmitWorkResponseProto); rpc sourceStateUpdated(SourceStateUpdatedRequestProto) returns (SourceStateUpdatedResponseProto); @@ -206,4 +213,5 @@ service LlapDaemonProtocol { service LlapManagementProtocol { rpc getDelegationToken(GetTokenRequestProto) returns (GetTokenResponseProto); + rpc purgeCache(PurgeCacheRequestProto) returns (PurgeCacheResponseProto); } diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java index 4fbaac1..6a361fa 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java @@ -183,6 +183,11 @@ public void setParentDebugDumper(LlapOomDebugDump dumper) { realPolicy.setParentDebugDumper(dumper); } + @Override + public long purge() { + return realPolicy.purge(); + } + @Override public long evictSomeBlocks(long memoryToReserve) { diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java index 2cd70b9..3323636 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java @@ -27,4 +27,5 @@ long evictSomeBlocks(long memoryToReserve); void setEvictionListener(EvictionListener listener); void setParentDebugDumper(LlapOomDebugDump dumper); + long purge(); } diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java index 50a2411..f7f80a8 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java @@ -24,6 +24,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.hive.llap.LlapUtil; import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl; @@ -70,6 +71,13 @@ public void setParentDebugDumper(LlapOomDebugDump dumper) { } @Override + public long purge() { + long evicted = evictSomeBlocks(Long.MAX_VALUE); + LlapIoImpl.LOG.info("PURGE: evicted {} from FIFO policy", LlapUtil.humanReadableByteCount(evicted)); + return evicted; + } + + @Override public long evictSomeBlocks(long memoryToReserve) { return evictInternal(memoryToReserve, -1); } diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java index b42f761..7787cb4 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java @@ -24,6 +24,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.llap.LlapUtil; import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl; @@ -177,6 +178,13 @@ public void setParentDebugDumper(LlapOomDebugDump dumper) { this.parentDebugDump = dumper; } + @Override + public long purge() { + long evicted = evictSomeBlocks(Long.MAX_VALUE); + LlapIoImpl.LOG.info("PURGE: evicted {} from LRFU policy", LlapUtil.humanReadableByteCount(evicted)); + return evicted; + } + @Override public long evictSomeBlocks(long memoryToReserve) { diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java index 81785f0..d856b25 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java @@ -25,6 +25,9 @@ import com.google.protobuf.ByteString; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; + +import org.apache.hadoop.hive.llap.io.api.LlapIo; +import org.apache.hadoop.hive.llap.io.api.LlapProxy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -62,6 +65,7 @@ implements LlapProtocolBlockingPB, LlapManagementProtocolPB { private static final Logger LOG = LoggerFactory.getLogger(LlapProtocolServerImpl.class); + private enum TokenRequiresSigning { TRUE, FALSE, EXCEPT_OWNER } @@ -272,6 +276,20 @@ public GetTokenResponseProto getDelegationToken(RpcController controller, return response; } + @Override + public LlapDaemonProtocolProtos.PurgeCacheResponseProto purgeCache(final RpcController controller, + final LlapDaemonProtocolProtos.PurgeCacheRequestProto request) throws ServiceException { + LlapDaemonProtocolProtos.PurgeCacheResponseProto.Builder responseProtoBuilder = LlapDaemonProtocolProtos + .PurgeCacheResponseProto.newBuilder(); + LlapIo llapIo = LlapProxy.getIo(); + if (llapIo != null) { + responseProtoBuilder.setPurgedMemoryBytes(llapIo.purge()); + } else { + responseProtoBuilder.setPurgedMemoryBytes(0); + } + return responseProtoBuilder.build(); + } + private boolean determineIfSigningIsRequired(UserGroupInformation callingUser) { switch (isSigningRequiredConfig) { case FALSE: return false; diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java index e5bc3c2..747b399 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java @@ -86,7 +86,6 @@ public static final Logger ORC_LOGGER = LoggerFactory.getLogger("LlapIoOrc"); public static final Logger CACHE_LOGGER = LoggerFactory.getLogger("LlapIoCache"); public static final Logger LOCKING_LOGGER = LoggerFactory.getLogger("LlapIoLocking"); - private static final String MODE_CACHE = "cache"; // TODO: later, we may have a map @@ -101,6 +100,7 @@ private final LowLevelCache dataCache; private final BufferUsageManager bufferManager; private final Configuration daemonConf; + private LowLevelCachePolicy cachePolicy; private LlapIoImpl(Configuration conf) throws IOException { this.daemonConf = conf; @@ -139,11 +139,13 @@ private LlapIoImpl(Configuration conf) throws IOException { boolean useLrfu = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_USE_LRFU); long totalMemorySize = HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE); int minAllocSize = (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC); - LowLevelCachePolicy cachePolicy = useLrfu ? new LowLevelLrfuCachePolicy( + LowLevelCachePolicy cp = useLrfu ? new LowLevelLrfuCachePolicy( minAllocSize, totalMemorySize, conf) : new LowLevelFifoCachePolicy(); boolean trackUsage = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_TRACK_CACHE_USAGE); if (trackUsage) { - cachePolicy = new CacheContentsTracker(cachePolicy); + this.cachePolicy = new CacheContentsTracker(cp); + } else { + this.cachePolicy = cp; } // Allocator uses memory manager to request memory, so create the manager next. LowLevelCacheMemoryManager memManager = new LowLevelCacheMemoryManager( @@ -212,6 +214,14 @@ public String getMemoryInfo() { } @Override + public long purge() { + if (cachePolicy != null) { + return cachePolicy.purge(); + } + return 0; + } + + @Override public InputFormat getInputFormat( InputFormat sourceInputFormat, Deserializer sourceSerDe) { ColumnVectorProducer cvp = genericCvp; diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java index 2c87bc2..b19cdcf 100644 --- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java @@ -120,6 +120,11 @@ public void setParentDebugDumper(LlapOomDebugDump dumper) { } @Override + public long purge() { + return 0; + } + + @Override public void debugDumpShort(StringBuilder sb) { } } diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java index f7ebff2..58c918c 100644 --- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java @@ -63,6 +63,11 @@ public String debugDumpForOom() { public void setParentDebugDumper(LlapOomDebugDump dumper) { } + @Override + public long purge() { + return 0; + } + public void verifyEquals(int i) { assertEquals(i, lockCount); assertEquals(i, unlockCount); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java index 74a34b3..3d47991 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java @@ -83,6 +83,10 @@ public static CommandProcessor getForHiveCommandInternal(String[] cmd, HiveConf return new AddResourceProcessor(); case LIST: return new ListResourceProcessor(); + case LLAP_CLUSTER: + return new LlapClusterResourceProcessor(); + case LLAP_CACHE: + return new LlapCacheResourceProcessor(); case DELETE: return new DeleteResourceProcessor(); case COMPILE: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java index c45563d..d3e46f6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java @@ -32,6 +32,8 @@ CRYPTO(true), ADD(), LIST(), + LLAP_CLUSTER(), + LLAP_CACHE(), RELOAD(), DELETE(), COMPILE(); @@ -77,6 +79,10 @@ public static HiveCommand find(String[] command, boolean findOnlyForTesting) { return null; } else if(command.length > 1 && "set".equalsIgnoreCase(command[0]) && "autocommit".equalsIgnoreCase(command[1])) { return null;//don't want set autocommit true|false to get mixed with set hive.foo.bar... + } else if (command.length > 1 && "llap".equalsIgnoreCase(command[0]) && "cluster".equalsIgnoreCase(command[1])) { + return LLAP_CLUSTER; + } else if (command.length > 1 && "llap".equalsIgnoreCase(command[0]) && "cache".equalsIgnoreCase(command[1])) { + return LLAP_CACHE; } else if (COMMANDS.contains(cmd)) { HiveCommand hiveCommand = HiveCommand.valueOf(cmd); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/LlapCacheResourceProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/LlapCacheResourceProcessor.java new file mode 100644 index 0000000..218f901 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/LlapCacheResourceProcessor.java @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.processors; + +import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_NULL_FORMAT; +import static org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe.defaultNullString; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import javax.net.SocketFactory; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.VariableSubstitution; +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos; +import org.apache.hadoop.hive.llap.impl.LlapManagementProtocolClientImpl; +import org.apache.hadoop.hive.llap.registry.LlapServiceInstance; +import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Schema; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.net.NetUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Joiner; + +public class LlapCacheResourceProcessor implements CommandProcessor { + public static final Logger LOG = LoggerFactory.getLogger(LlapCacheResourceProcessor.class); + private Options CACHE_OPTIONS = new Options(); + private HelpFormatter helpFormatter = new HelpFormatter(); + + LlapCacheResourceProcessor() { + CACHE_OPTIONS.addOption("purge", "purge", false, "Purge LLAP IO cache"); + } + + private CommandProcessorResponse returnErrorResponse(final String errmsg) { + return new CommandProcessorResponse(1, "LLAP Cache Processor Helper Failed:" + errmsg, null); + } + + @Override + public CommandProcessorResponse run(String command) { + SessionState ss = SessionState.get(); + command = new VariableSubstitution(() -> SessionState.get().getHiveVariables()).substitute(ss.getConf(), command); + String[] tokens = command.split("\\s+"); + if (tokens.length < 1) { + return returnErrorResponse("Command arguments are empty."); + } + String params[] = Arrays.copyOfRange(tokens, 1, tokens.length); + try { + return llapCacheCommandHandler(ss, params); + } catch (Exception e) { + return returnErrorResponse(e.getMessage()); + } + } + + private CommandProcessorResponse llapCacheCommandHandler(final SessionState ss, + final String[] params) throws ParseException { + CommandLine args = parseCommandArgs(CACHE_OPTIONS, params); + boolean purge = args.hasOption("purge"); + if (purge) { + CommandProcessorResponse authErrResp = + CommandUtil.authorizeCommand(ss, HiveOperationType.LLAP_CACHE, Arrays.asList(params)); + if (authErrResp != null) { + // there was an authorization issue + return authErrResp; + } + try { + LlapRegistryService llapRegistryService = LlapRegistryService.getClient(ss.getConf()); + llapCachePurge(ss, llapRegistryService); + return createProcessorSuccessResponse(); + } catch (Exception e) { + LOG.error("Error while purging LLAP IO Cache. err: ", e); + return returnErrorResponse("Error while purging LLAP IO Cache. err: " + e.getMessage()); + } + } else { + String usage = getUsageAsString(); + return returnErrorResponse("Unsupported sub-command option. " + usage); + } + } + + private CommandProcessorResponse createProcessorSuccessResponse() { + return new CommandProcessorResponse(0, null, null, getSchema()); + } + + private Schema getSchema() { + Schema sch = new Schema(); + sch.addToFieldSchemas(new FieldSchema("hostName", "string", "")); + sch.addToFieldSchemas(new FieldSchema("purgedMemoryBytes", "string", "")); + sch.putToProperties(SERIALIZATION_NULL_FORMAT, defaultNullString); + return sch; + } + + private void llapCachePurge(final SessionState ss, final LlapRegistryService llapRegistryService) throws Exception { + ExecutorService executorService = Executors.newCachedThreadPool(); + List> futures = new ArrayList<>(); + Collection instances = llapRegistryService.getInstances().getAll(); + for (LlapServiceInstance instance : instances) { + futures.add(executorService.submit(new PurgeCallable(ss.getConf(), instance))); + } + + int i = 0; + for (LlapServiceInstance instance : instances) { + Future future = futures.get(i); + ss.out.println(Joiner.on("\t").join(instance.getHost(), future.get())); + i++; + } + } + + private static class PurgeCallable implements Callable { + public static final Logger LOG = LoggerFactory.getLogger(PurgeCallable.class); + private Configuration conf; + private LlapServiceInstance instance; + private SocketFactory socketFactory; + private RetryPolicy retryPolicy; + + PurgeCallable(Configuration conf, LlapServiceInstance llapServiceInstance) { + this.conf = conf; + this.instance = llapServiceInstance; + this.socketFactory = NetUtils.getDefaultSocketFactory(conf); + //not making this configurable, best effort + this.retryPolicy = RetryPolicies.retryUpToMaximumTimeWithFixedSleep( + 10000, 2000L, TimeUnit.MILLISECONDS); + } + + @Override + public Long call() { + try { + LlapManagementProtocolClientImpl client = new LlapManagementProtocolClientImpl(conf, instance.getHost(), + instance.getManagementPort(), retryPolicy, socketFactory); + LlapDaemonProtocolProtos.PurgeCacheResponseProto resp = client.purgeCache(null, LlapDaemonProtocolProtos + .PurgeCacheRequestProto.newBuilder().build()); + return resp.getPurgedMemoryBytes(); + } catch (Exception e) { + LOG.warn("Exception while purging cache.", e); + return 0L; + } + } + } + + private String getUsageAsString() { + StringWriter out = new StringWriter(); + PrintWriter pw = new PrintWriter(out); + helpFormatter.printUsage(pw, helpFormatter.getWidth(), "llap cache", CACHE_OPTIONS); + pw.flush(); + return out.toString(); + } + + private CommandLine parseCommandArgs(final Options opts, String[] args) throws ParseException { + CommandLineParser parser = new GnuParser(); + return parser.parse(opts, args); + } + + @Override + public void close() { + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/LlapClusterResourceProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/LlapClusterResourceProcessor.java new file mode 100644 index 0000000..0238727 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/LlapClusterResourceProcessor.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.processors; + +import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_NULL_FORMAT; +import static org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe.defaultNullString; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.Arrays; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.hadoop.hive.conf.VariableSubstitution; +import org.apache.hadoop.hive.llap.registry.LlapServiceInstance; +import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Schema; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Joiner; + +public class LlapClusterResourceProcessor implements CommandProcessor { + public static final Logger LOG = LoggerFactory.getLogger(LlapClusterResourceProcessor.class); + private Options CLUSTER_OPTIONS = new Options(); + private HelpFormatter helpFormatter = new HelpFormatter(); + + LlapClusterResourceProcessor() { + CLUSTER_OPTIONS.addOption("info", "info", false, "Information about LLAP cluster"); + } + + private CommandProcessorResponse returnErrorResponse(final String errmsg) { + return new CommandProcessorResponse(1, "LLAP Cluster Processor Helper Failed:" + errmsg, null); + } + + @Override + public CommandProcessorResponse run(String command) { + SessionState ss = SessionState.get(); + command = new VariableSubstitution(() -> SessionState.get().getHiveVariables()).substitute(ss.getConf(), command); + String[] tokens = command.split("\\s+"); + if (tokens.length < 1) { + return returnErrorResponse("Command arguments are empty."); + } + + String params[] = Arrays.copyOfRange(tokens, 1, tokens.length); + try { + return llapClusterCommandHandler(ss, params); + } catch (Exception e) { + return returnErrorResponse(e.getMessage()); + } + } + + private CommandProcessorResponse llapClusterCommandHandler(final SessionState ss, + final String[] params) throws ParseException { + CommandLine args = parseCommandArgs(CLUSTER_OPTIONS, params); + + // no auth check for "LLAP CLUSTER INFO" + boolean hasInfo = args.hasOption("info"); + if (hasInfo) { + try { + LlapRegistryService llapRegistryService = LlapRegistryService.getClient(ss.getConf()); + String appId = llapRegistryService.getApplicationId() == null ? "null" : + llapRegistryService.getApplicationId().toString(); + for (LlapServiceInstance instance : llapRegistryService.getInstances().getAll()) { + ss.out.println(Joiner.on("\t").join(appId, instance.getWorkerIdentity(), instance.getHost(), + instance.getRpcPort(), instance.getResource().getMemory() * 1024L * 1024L, + instance.getResource().getVirtualCores())); + } + return createProcessorSuccessResponse(); + } catch (Exception e) { + LOG.error("Unable to list LLAP instances. err: ", e); + return returnErrorResponse("Unable to list LLAP instances. err: " + e.getMessage()); + } + } else { + String usage = getUsageAsString(); + return returnErrorResponse("Unsupported sub-command option. " + usage); + } + } + + private CommandProcessorResponse createProcessorSuccessResponse() { + return new CommandProcessorResponse(0, null, null, getSchema()); + } + + private Schema getSchema() { + Schema sch = new Schema(); + sch.addToFieldSchemas(new FieldSchema("applicationId", "string", "")); + sch.addToFieldSchemas(new FieldSchema("workerIdentity", "string", "")); + sch.addToFieldSchemas(new FieldSchema("hostname", "string", "")); + sch.addToFieldSchemas(new FieldSchema("rpcPort", "string", "")); + sch.addToFieldSchemas(new FieldSchema("memory", "string", "")); + sch.addToFieldSchemas(new FieldSchema("vcores", "string", "")); + sch.putToProperties(SERIALIZATION_NULL_FORMAT, defaultNullString); + return sch; + } + + private String getUsageAsString() { + StringWriter out = new StringWriter(); + PrintWriter pw = new PrintWriter(out); + helpFormatter.printUsage(pw, helpFormatter.getWidth(), "llap cluster", CLUSTER_OPTIONS); + pw.flush(); + return out.toString(); + } + + private CommandLine parseCommandArgs(final Options opts, String[] args) throws ParseException { + CommandLineParser parser = new GnuParser(); + return parser.parse(opts, args); + } + + @Override + public void close() { + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java index 5d6905a..0374d37 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java @@ -147,6 +147,8 @@ CREATE_MAPPING, ALTER_MAPPING, DROP_MAPPING, + LLAP_CLUSTER, + LLAP_CACHE, // ==== Hive command operation types starts here ==== // SET, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java index a55e66b..14aa8df 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java @@ -421,6 +421,10 @@ public HivePrivilegeObjectType getObjectType() { (null, null)); op2Priv.put(HiveOperationType.DFS, PrivRequirement.newIOPrivRequirement (ADMIN_PRIV_AR, ADMIN_PRIV_AR)); + op2Priv.put(HiveOperationType.LLAP_CLUSTER, PrivRequirement.newIOPrivRequirement + (null, null)); + op2Priv.put(HiveOperationType.LLAP_CACHE, PrivRequirement.newIOPrivRequirement + (ADMIN_PRIV_AR, ADMIN_PRIV_AR)); op2Priv.put(HiveOperationType.RESET, PrivRequirement.newIOPrivRequirement (null, null)); op2Priv.put(HiveOperationType.COMPILE, PrivRequirement.newIOPrivRequirement diff --git a/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java b/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java index e3a5922..8fbfe76 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java @@ -172,7 +172,7 @@ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws H RowSet rowSet = RowSetFactory.create(resultSchema, getProtocolVersion(), false); for (String row : rows) { - rowSet.addRow(new String[] { row }); + rowSet.addRow(row.split("\\t")); } return rowSet; }