From 5204086bd49075cf93245107984a40b01b713b9f Mon Sep 17 00:00:00 2001 From: Parth Brahmbhatt Date: Wed, 20 May 2015 12:03:15 -0700 Subject: [PATCH 1/8] KAFKA-2210: Kafka authorizer public entities and changes to KafkaAPI and KafkaServer to allow custom authorizer implementation. --- .../kafka/common/AuthorizationException.scala | 25 ++++ .../src/main/scala/kafka/common/ErrorMapping.scala | 8 +- core/src/main/scala/kafka/security/auth/Acl.scala | 117 ++++++++++++++++ .../scala/kafka/security/auth/Authorizer.scala | 83 +++++++++++ .../scala/kafka/security/auth/KafkaPrincipal.scala | 66 +++++++++ .../main/scala/kafka/security/auth/Operation.java | 45 ++++++ .../scala/kafka/security/auth/PermissionType.java | 22 +++ .../main/scala/kafka/security/auth/Resource.scala | 60 ++++++++ .../scala/kafka/security/auth/ResourceType.java | 40 ++++++ core/src/main/scala/kafka/server/KafkaApis.scala | 156 +++++++++++++++++---- core/src/main/scala/kafka/server/KafkaConfig.scala | 18 +++ core/src/main/scala/kafka/server/KafkaServer.scala | 50 ++++--- core/src/test/resources/acl.json | 39 ++++++ .../scala/unit/kafka/security/auth/AclTest.scala | 70 +++++++++ .../kafka/security/auth/KafkaPrincipalTest.scala | 43 ++++++ .../unit/kafka/security/auth/ResourceTest.scala | 43 ++++++ .../kafka/server/KafkaConfigConfigDefTest.scala | 5 + 17 files changed, 837 insertions(+), 53 deletions(-) create mode 100644 core/src/main/scala/kafka/common/AuthorizationException.scala create mode 100644 core/src/main/scala/kafka/security/auth/Acl.scala create mode 100644 core/src/main/scala/kafka/security/auth/Authorizer.scala create mode 100644 core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala create mode 100644 core/src/main/scala/kafka/security/auth/Operation.java create mode 100644 core/src/main/scala/kafka/security/auth/PermissionType.java create mode 100644 core/src/main/scala/kafka/security/auth/Resource.scala create mode 100644 core/src/main/scala/kafka/security/auth/ResourceType.java create mode 100644 core/src/test/resources/acl.json create mode 100644 core/src/test/scala/unit/kafka/security/auth/AclTest.scala create mode 100644 core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala create mode 100644 core/src/test/scala/unit/kafka/security/auth/ResourceTest.scala diff --git a/core/src/main/scala/kafka/common/AuthorizationException.scala b/core/src/main/scala/kafka/common/AuthorizationException.scala new file mode 100644 index 0000000..12ee0fe --- /dev/null +++ b/core/src/main/scala/kafka/common/AuthorizationException.scala @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.common + +/** + * Exception thrown when a principal is not authorized to perform an operation. + * @param message + */ +class AuthorizationException(message: String) extends RuntimeException(message) { + def this() = this(null) +} diff --git a/core/src/main/scala/kafka/common/ErrorMapping.scala b/core/src/main/scala/kafka/common/ErrorMapping.scala index c75c685..5ad3ae5 100644 --- a/core/src/main/scala/kafka/common/ErrorMapping.scala +++ b/core/src/main/scala/kafka/common/ErrorMapping.scala @@ -17,8 +17,10 @@ package kafka.common -import kafka.message.InvalidMessageException import java.nio.ByteBuffer + +import kafka.message.InvalidMessageException + import scala.Predef._ /** @@ -51,6 +53,7 @@ object ErrorMapping { val NotEnoughReplicasAfterAppendCode: Short = 20 // 21: InvalidRequiredAcks // 22: IllegalConsumerGeneration + val AuthorizationCode: Short = 24; private val exceptionToCode = Map[Class[Throwable], Short]( @@ -72,7 +75,8 @@ object ErrorMapping { classOf[InvalidTopicException].asInstanceOf[Class[Throwable]] -> InvalidTopicCode, classOf[MessageSetSizeTooLargeException].asInstanceOf[Class[Throwable]] -> MessageSetSizeTooLargeCode, classOf[NotEnoughReplicasException].asInstanceOf[Class[Throwable]] -> NotEnoughReplicasCode, - classOf[NotEnoughReplicasAfterAppendException].asInstanceOf[Class[Throwable]] -> NotEnoughReplicasAfterAppendCode + classOf[NotEnoughReplicasAfterAppendException].asInstanceOf[Class[Throwable]] -> NotEnoughReplicasAfterAppendCode, + classOf[AuthorizationException].asInstanceOf[Class[Throwable]] -> AuthorizationCode ).withDefaultValue(UnknownCode) /* invert the mapping */ diff --git a/core/src/main/scala/kafka/security/auth/Acl.scala b/core/src/main/scala/kafka/security/auth/Acl.scala new file mode 100644 index 0000000..c062509 --- /dev/null +++ b/core/src/main/scala/kafka/security/auth/Acl.scala @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.security.auth + +import kafka.utils.Json + +object Acl { + val wildCardPrincipal: KafkaPrincipal = new KafkaPrincipal("user", "*") + val wildCardHost: String = "*" + val allowAllAcl = new Acl(Set[KafkaPrincipal](wildCardPrincipal), PermissionType.ALLOW, Set[String](wildCardHost), Set[Operation](Operation.ALL)) + val PRINCIPAL_KEY = "principal" + val PERMISSION_TYPE_KEY = "permissionType" + val OPERATIONS_KEY = "operations" + val HOSTS_KEY = "hosts" + val VERSION_KEY = "version" + val CURRENT_VERSION = 1 + val ACLS_KEY = "acls" + + def fromJson(aclJson: String): Set[Acl] = { + if(aclJson == null || aclJson.isEmpty) { + return collection.immutable.Set.empty[Acl] + } + var acls: collection.mutable.HashSet[Acl] = new collection.mutable.HashSet[Acl]() + Json.parseFull(aclJson) match { + case Some(m) => + val aclMap = m.asInstanceOf[Map[String, Any]] + //the acl json version. + require(aclMap.get(VERSION_KEY).get == CURRENT_VERSION) + val aclSet: List[Map[String, Any]] = aclMap.get(ACLS_KEY).get.asInstanceOf[List[Map[String, Any]]] + aclSet.foreach(item => { + val principals: List[KafkaPrincipal] = item(PRINCIPAL_KEY).asInstanceOf[List[String]].map(principal => KafkaPrincipal.fromString(principal)) + val permissionType: PermissionType = PermissionType.valueOf(item(PERMISSION_TYPE_KEY).asInstanceOf[String]) + val operations: List[Operation] = item(OPERATIONS_KEY).asInstanceOf[List[String]].map(operation => Operation.fromString(operation)) + val hosts: List[String] = item(HOSTS_KEY).asInstanceOf[List[String]] + acls += new Acl(principals.toSet, permissionType, hosts.toSet, operations.toSet) + }) + case None => + } + return acls.toSet + } + + def toJsonCompatibleMap(acls: Set[Acl]): Map[String,Any] = { + acls match { + case aclSet: Set[Acl] => Map(Acl.VERSION_KEY -> Acl.CURRENT_VERSION, Acl.ACLS_KEY -> aclSet.map(acl => acl.toMap).toList) + case _ => null + } + } +} + +/** + * An instance of this class will represent an acl that can express following statement. + *
+ * Principal P has permissionType PT on Operations O1,O2 from hosts H1,H2.
+ * 
+ * @param principals A value of *:* indicates all users. + * @param permissionType + * @param hosts A value of * indicates all hosts. + * @param operations A value of ALL indicates all operations. + */ +class Acl(val principals: Set[KafkaPrincipal],val permissionType: PermissionType,val hosts: Set[String],val operations: Set[Operation]) { + + /** + * TODO: Ideally we would have a symmetric toJson method but our current json library fails to decode double parsed json strings so + * convert to map which then gets converted to json. + * Convert an acl instance to a map + * @return Map representation of the Acl. + */ + def toMap() : Map[String, Any] = { + val map: collection.mutable.HashMap[String, Any] = new collection.mutable.HashMap[String, Any]() + map.put(Acl.PRINCIPAL_KEY, principals.map(principal => principal.toString)) + map.put(Acl.PERMISSION_TYPE_KEY, permissionType.name()) + map.put(Acl.OPERATIONS_KEY, operations.map(operation => operation.name())) + map.put(Acl.HOSTS_KEY, hosts) + + map.toMap + } + + override def equals(that: Any): Boolean = { + if(!(that.isInstanceOf[Acl])) + return false + val other = that.asInstanceOf[Acl] + if(permissionType.equals(other.permissionType) && operations.equals(other.operations) && principals.equals(other.principals) + && hosts.map(host => host.toLowerCase()).equals(other.hosts.map(host=> host.toLowerCase()))) { + return true + } + false + } + + + override def hashCode(): Int = { + 31 + + principals.foldLeft(0)((r: Int, c: KafkaPrincipal) => r + c.hashCode()) + + operations.foldLeft(0)((r: Int, c: Operation) => r + c.hashCode()) + + hosts.foldLeft(0)((r: Int, c: String) => r + c.toLowerCase().hashCode()) + } + + override def toString() : String = { + return "%s has %s permission for operations: %s from hosts: %s".format(principals.mkString(","), permissionType.name(), operations.mkString(","), hosts.mkString(",")) + } + +} + diff --git a/core/src/main/scala/kafka/security/auth/Authorizer.scala b/core/src/main/scala/kafka/security/auth/Authorizer.scala new file mode 100644 index 0000000..72ab803 --- /dev/null +++ b/core/src/main/scala/kafka/security/auth/Authorizer.scala @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.security.auth + +import kafka.network.RequestChannel.Session +import kafka.server.KafkaConfig + +/** + * Top level interface that all plugable authorizer must implement. Kafka server will read "authorizer.class" config + * value at startup time, create an instance of the specified class and call initialize method. + * authorizer.class must be a class that implements this interface. + * If authorizer.class has no value specified no authorization will be performed. + * + * From that point onwards, every client request will first be routed to authorize method and the request will only be + * authorized if the method returns true. + */ +trait Authorizer { + /** + * Guaranteed to be called before any authorize call is made. + */ + def initialize(kafkaConfig: KafkaConfig): Unit + + /** + * @param session The session being authenticated. + * @param operation Type of operation client is trying to perform on resource. + * @param resource Resource the client is trying to access. + * @return + */ + def authorize(session: Session, operation: Operation, resource: Resource): Boolean + + /** + * add the acls to resource, this is an additive operation so existing acls will not be overwritten, instead these new + * acls will be added to existing acls. + * @param acls set of acls to add to existing acls + * @param resource the resource to which these acls should be attached. + */ + def addAcls(acls: Set[Acl], resource: Resource): Unit + + /** + * remove these acls from the resource. + * @param acls set of acls to be removed. + * @param resource resource from which the acls should be removed. + * @return true if some acl got removed, false if no acl was removed. + */ + def removeAcls(acls: Set[Acl], resource: Resource): Boolean + + /** + * remove a resource along with all of its acls from acl store. + * @param resource + * @return + */ + def removeAcls(resource: Resource): Boolean + + /** + * get set of acls for this resource + * @param resource + * @return empty set if no acls are found, otherwise the acls for the resource. + */ + def getAcls(resource: Resource): Set[Acl] + + /** + * get the acls for this principal. + * @param principal + * @return empty set if no acls exist for this principal, otherwise the acls for the principal. + */ + def getAcls(principal: KafkaPrincipal): Set[Acl] +} + diff --git a/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala b/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala new file mode 100644 index 0000000..21f7d44 --- /dev/null +++ b/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.security.auth + +import java.security.Principal + +object KafkaPrincipal { + val seperator: String = ":" + val userType: String = "User" + + def fromString(str: String) : KafkaPrincipal = { + val arr: Array[String] = str.split(seperator) + + if(arr.length != 2) { + throw new IllegalArgumentException("expected a string in format principalType:principalName but got " + str) + } + + new KafkaPrincipal(arr(0), arr(1)) + } +} + +/** + * + * @param principalType type of principal user,unixgroup, ldapgroup. + * @param name name of the principal + */ +class KafkaPrincipal(val principalType: String,val name: String) extends Principal { + + override def getName: String = { + name + } + + override def toString: String = { + principalType + KafkaPrincipal.seperator + name + } + + override def equals(that: Any): Boolean = { + if(!(that.isInstanceOf[KafkaPrincipal])) + return false + val other: KafkaPrincipal = that.asInstanceOf[KafkaPrincipal] + if(principalType.equalsIgnoreCase(other.principalType) && name.equalsIgnoreCase(other.name)) + return true + false + } + + override def hashCode(): Int = { + 31 + principalType.toLowerCase.hashCode + name.toLowerCase.hashCode + } +} + + + diff --git a/core/src/main/scala/kafka/security/auth/Operation.java b/core/src/main/scala/kafka/security/auth/Operation.java new file mode 100644 index 0000000..517c5e0 --- /dev/null +++ b/core/src/main/scala/kafka/security/auth/Operation.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.security.auth; + +/** + * Different operations a client may perform on kafka resources. + */ +public enum Operation { + READ, + WRITE, + CREATE, + DELETE, + ALTER, + DESCRIBE, + CLUSTER_ACTION, + ALL; + + /** + * method defined for case insensitive check. the default valueOf() method is case sensitive + */ + public static Operation fromString(String operationName) { + if(operationName != null) { + for(Operation operation: Operation.values()) { + if(operationName.equalsIgnoreCase(operation.name())) { + return operation; + } + } + } + throw new IllegalArgumentException("no matching enum value found for " + operationName); + } +} diff --git a/core/src/main/scala/kafka/security/auth/PermissionType.java b/core/src/main/scala/kafka/security/auth/PermissionType.java new file mode 100644 index 0000000..b844d41 --- /dev/null +++ b/core/src/main/scala/kafka/security/auth/PermissionType.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.security.auth; + +public enum PermissionType { + ALLOW, + DENY +} diff --git a/core/src/main/scala/kafka/security/auth/Resource.scala b/core/src/main/scala/kafka/security/auth/Resource.scala new file mode 100644 index 0000000..37fd88b --- /dev/null +++ b/core/src/main/scala/kafka/security/auth/Resource.scala @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.security.auth + +object Resource { + val separator: String = ":" + val clusterResourceName: String = "kafka-cluster" + val clusterResource: Resource = new Resource(ResourceType.CLUSTER,Resource.clusterResourceName) + + def fromString(str: String) : Resource = { + val arr: Array[String] = str.split(separator) + + if(arr.length != 2) { + throw new IllegalArgumentException("expected a string in format resourceType:name but got " + str + ".allowed resource types are" + ResourceType.values()) + } + + new Resource(ResourceType.fromString(arr(0)), arr(1)) + } +} + +/** + * + * @param resourceType type of resource. + * @param name name of the resource, for topic this will be topic name , for group it will be group name. For cluster type + * it will be a constant string kafka-cluster. + */ +class Resource(val resourceType: ResourceType,val name: String) { + + override def toString: String = { + resourceType.name() + Resource.separator + name + } + + override def equals(that: Any): Boolean = { + if(!(that.isInstanceOf[Resource])) + return false + val other: Resource = that.asInstanceOf[Resource] + if(resourceType.equals(other.resourceType) && name.equalsIgnoreCase(other.name)) + return true + false + } + + override def hashCode(): Int = { + 31 + resourceType.hashCode + name.toLowerCase.hashCode + } +} + diff --git a/core/src/main/scala/kafka/security/auth/ResourceType.java b/core/src/main/scala/kafka/security/auth/ResourceType.java new file mode 100644 index 0000000..70ed1a6 --- /dev/null +++ b/core/src/main/scala/kafka/security/auth/ResourceType.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.security.auth; + +/** + * ResourceTypes. + */ +public enum ResourceType { + CLUSTER, + TOPIC, + CONSUMER_GROUP; + + /** + * method defined for case insensitive check. the default valueOf() method is case sensitive + */ + public static ResourceType fromString(String resourceType) { + if(resourceType != null) { + for(ResourceType rType: ResourceType.values()) { + if(resourceType.equalsIgnoreCase(rType.name())) { + return rType; + } + } + } + throw new IllegalArgumentException("no matching enum value found for " + resourceType); + } +} diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 18f5b5b..13dffce 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -17,20 +17,23 @@ package kafka.server -import org.apache.kafka.common.protocol.SecurityProtocol -import org.apache.kafka.common.TopicPartition -import kafka.api._ import kafka.admin.AdminUtils +import kafka.api._ import kafka.common._ import kafka.controller.KafkaController import kafka.coordinator.ConsumerCoordinator import kafka.log._ -import kafka.network._ +import kafka.message.MessageSet import kafka.network.RequestChannel.Response -import org.apache.kafka.common.requests.{JoinGroupRequest, JoinGroupResponse, HeartbeatRequest, HeartbeatResponse, ResponseHeader, ResponseSend} -import kafka.utils.{ZkUtils, ZKGroupTopicDirs, SystemTime, Logging} -import scala.collection._ +import kafka.network._ +import kafka.security.auth.{Authorizer, Operation, Resource, ResourceType} +import kafka.utils.{Logging, SystemTime, ZKGroupTopicDirs, ZkUtils} import org.I0Itec.zkclient.ZkClient +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.protocol.SecurityProtocol +import org.apache.kafka.common.requests.{HeartbeatRequest, HeartbeatResponse, JoinGroupRequest, JoinGroupResponse, ResponseHeader, ResponseSend} + +import scala.collection._ /** * Logic to handle the various Kafka requests @@ -42,7 +45,8 @@ class KafkaApis(val requestChannel: RequestChannel, val zkClient: ZkClient, val brokerId: Int, val config: KafkaConfig, - val metadataCache: MetadataCache) extends Logging { + val metadataCache: MetadataCache, + val authorizer: Option[Authorizer]) extends Logging { this.logIdent = "[KafkaApi-%d] ".format(brokerId) @@ -93,6 +97,13 @@ class KafkaApis(val requestChannel: RequestChannel, // We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they // stop serving data to clients for the topic being deleted val leaderAndIsrRequest = request.requestObj.asInstanceOf[LeaderAndIsrRequest] + + if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.clusterResource)) { + val leaderAndIsrResponse = new LeaderAndIsrResponse(leaderAndIsrRequest.correlationId, Map.empty, ErrorMapping.AuthorizationCode) + requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, leaderAndIsrResponse))) + return + } + try { // call replica manager to handle updating partitions to become leader or follower val result = replicaManager.becomeLeaderOrFollower(leaderAndIsrRequest) @@ -124,6 +135,13 @@ class KafkaApis(val requestChannel: RequestChannel, // We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they // stop serving data to clients for the topic being deleted val stopReplicaRequest = request.requestObj.asInstanceOf[StopReplicaRequest] + + if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.clusterResource)) { + val stopReplicaResponse = new StopReplicaResponse(stopReplicaRequest.correlationId, Map.empty, ErrorMapping.AuthorizationCode) + requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, stopReplicaResponse))) + return + } + val (response, error) = replicaManager.stopReplicas(stopReplicaRequest) val stopReplicaResponse = new StopReplicaResponse(stopReplicaRequest.correlationId, response.toMap, error) requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, stopReplicaResponse))) @@ -132,6 +150,18 @@ class KafkaApis(val requestChannel: RequestChannel, def handleUpdateMetadataRequest(request: RequestChannel.Request) { val updateMetadataRequest = request.requestObj.asInstanceOf[UpdateMetadataRequest] + + if(authorizer.isDefined) { + val unauthorizedTopicAndPartition = updateMetadataRequest.partitionStateInfos.filterKeys( + topicAndPartition => !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.clusterResource)).keys + //In this case the response does not allow to selectively report success/failure so if authorization fails, we fail the entire request. + if (unauthorizedTopicAndPartition.nonEmpty) { + val updateMetadataResponse = new UpdateMetadataResponse(updateMetadataRequest.correlationId, ErrorMapping.AuthorizationCode) + requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, updateMetadataResponse))) + return + } + } + replicaManager.maybeUpdateMetadataCache(updateMetadataRequest, metadataCache) val updateMetadataResponse = new UpdateMetadataResponse(updateMetadataRequest.correlationId) @@ -143,6 +173,13 @@ class KafkaApis(val requestChannel: RequestChannel, // We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they // stop serving data to clients for the topic being deleted val controlledShutdownRequest = request.requestObj.asInstanceOf[ControlledShutdownRequest] + + if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.clusterResource)) { + val controlledShutdownResponse = new ControlledShutdownResponse(controlledShutdownRequest.correlationId, ErrorMapping.AuthorizationCode, Set.empty) + requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, controlledShutdownResponse))) + return + } + val partitionsRemaining = controller.shutdownBroker(controlledShutdownRequest.brokerId) val controlledShutdownResponse = new ControlledShutdownResponse(controlledShutdownRequest.correlationId, ErrorMapping.NoError, partitionsRemaining) @@ -162,26 +199,32 @@ class KafkaApis(val requestChannel: RequestChannel, } val filteredRequestInfo = (offsetCommitRequest.requestInfo -- invalidRequestsInfo.keys) + val (authorizedRequestInfo, unauthorizedRequestInfo) = filteredRequestInfo.partition( + mapEntry => !authorizer.isDefined || (authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.TOPIC,mapEntry._1.topic)) && + authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.CONSUMER_GROUP,offsetCommitRequest.groupId)))) + // the callback for sending an offset commit response def sendResponseCallback(commitStatus: immutable.Map[TopicAndPartition, Short]) { - commitStatus.foreach { case (topicAndPartition, errorCode) => + val mergedCommitStatus = commitStatus ++ unauthorizedRequestInfo.mapValues(_ => ErrorMapping.AuthorizationCode) + + mergedCommitStatus.foreach { case (topicAndPartition, errorCode) => // we only print warnings for known errors here; only replica manager could see an unknown // exception while trying to write the offset message to the local log, and it will log // an error message and write the error code in this case; hence it can be ignored here if (errorCode != ErrorMapping.NoError && errorCode != ErrorMapping.UnknownCode) { debug("Offset commit request with correlation id %d from client %s on partition %s failed due to %s" .format(offsetCommitRequest.correlationId, offsetCommitRequest.clientId, - topicAndPartition, ErrorMapping.exceptionNameFor(errorCode))) + topicAndPartition, ErrorMapping.exceptionNameFor(errorCode))) } } - val combinedCommitStatus = commitStatus ++ invalidRequestsInfo.map(_._1 -> ErrorMapping.UnknownTopicOrPartitionCode) + val combinedCommitStatus = mergedCommitStatus ++ invalidRequestsInfo.map(_._1 -> ErrorMapping.UnknownTopicOrPartitionCode) val response = OffsetCommitResponse(combinedCommitStatus, offsetCommitRequest.correlationId) requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response))) } if (offsetCommitRequest.versionId == 0) { // for version 0 always store offsets to ZK - val responseInfo = filteredRequestInfo.map { + val responseInfo = authorizedRequestInfo.map { case (topicAndPartition, metaAndError) => { val topicDirs = new ZKGroupTopicDirs(offsetCommitRequest.groupId, topicAndPartition.topic) try { @@ -222,8 +265,7 @@ class KafkaApis(val requestChannel: RequestChannel, // - If v2 we use the default expiration timestamp val currentTimestamp = SystemTime.milliseconds val defaultExpireTimestamp = offsetRetention + currentTimestamp - - val offsetData = filteredRequestInfo.mapValues(offsetAndMetadata => + val offsetData = authorizedRequestInfo.mapValues(offsetAndMetadata => offsetAndMetadata.copy( commitTimestamp = currentTimestamp, expireTimestamp = { @@ -251,10 +293,15 @@ class KafkaApis(val requestChannel: RequestChannel, def handleProducerRequest(request: RequestChannel.Request) { val produceRequest = request.requestObj.asInstanceOf[ProducerRequest] + val (authorizedRequestInfo, unauthorizedRequestInfo) = produceRequest.data.partition( + mapEntry => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.WRITE, new Resource(ResourceType.TOPIC,mapEntry._1.topic))) + // the callback for sending a produce response def sendResponseCallback(responseStatus: Map[TopicAndPartition, ProducerResponseStatus]) { var errorInResponse = false - responseStatus.foreach { case (topicAndPartition, status) => + val mergedResponseStatus = responseStatus ++ unauthorizedRequestInfo.mapValues(_ => ProducerResponseStatus(ErrorMapping.AuthorizationCode, -1)) + + mergedResponseStatus.foreach { case (topicAndPartition, status) => // we only print warnings for known errors here; if it is unknown, it will cause // an error message in the replica manager if (status.error != ErrorMapping.NoError && status.error != ErrorMapping.UnknownCode) { @@ -277,7 +324,7 @@ class KafkaApis(val requestChannel: RequestChannel, requestChannel.noOperation(request.processor, request) } } else { - val response = ProducerResponse(produceRequest.correlationId, responseStatus) + val response = ProducerResponse(produceRequest.correlationId, mergedResponseStatus) requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response))) } } @@ -291,7 +338,7 @@ class KafkaApis(val requestChannel: RequestChannel, produceRequest.ackTimeoutMs.toLong, produceRequest.requiredAcks, internalTopicsAllowed, - produceRequest.data, + authorizedRequestInfo, sendResponseCallback) // if the request is put into the purgatory, it will have a held reference @@ -306,9 +353,16 @@ class KafkaApis(val requestChannel: RequestChannel, def handleFetchRequest(request: RequestChannel.Request) { val fetchRequest = request.requestObj.asInstanceOf[FetchRequest] + val (authorizedRequestInfo, unauthorizedRequestInfo) = fetchRequest.requestInfo.partition( + mapEntry => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.TOPIC,mapEntry._1.topic))) + + val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ => FetchResponsePartitionData(ErrorMapping.AuthorizationCode, -1, MessageSet.Empty)) + // the callback for sending a fetch response def sendResponseCallback(responsePartitionData: Map[TopicAndPartition, FetchResponsePartitionData]) { - responsePartitionData.foreach { case (topicAndPartition, data) => + val mergedResponseStatus = responsePartitionData ++ unauthorizedResponseStatus + + mergedResponseStatus.foreach { case (topicAndPartition, data) => // we only print warnings for known errors here; if it is unknown, it will cause // an error message in the replica manager already and hence can be ignored here if (data.error != ErrorMapping.NoError && data.error != ErrorMapping.UnknownCode) { @@ -331,7 +385,7 @@ class KafkaApis(val requestChannel: RequestChannel, fetchRequest.maxWait.toLong, fetchRequest.replicaId, fetchRequest.minBytes, - fetchRequest.requestInfo, + authorizedRequestInfo, sendResponseCallback) } @@ -340,7 +394,13 @@ class KafkaApis(val requestChannel: RequestChannel, */ def handleOffsetRequest(request: RequestChannel.Request) { val offsetRequest = request.requestObj.asInstanceOf[OffsetRequest] - val responseMap = offsetRequest.requestInfo.map(elem => { + + val (authorizedRequestInfo, unauthorizedRequestInfo) = offsetRequest.requestInfo.partition( + mapEntry => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC,mapEntry._1.topic))) + + val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ => PartitionOffsetsResponse(ErrorMapping.AuthorizationCode, Nil)) + + val responseMap = authorizedRequestInfo.map(elem => { val (topicAndPartition, partitionOffsetRequestInfo) = elem try { // ensure leader exists @@ -380,7 +440,9 @@ class KafkaApis(val requestChannel: RequestChannel, (topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]), Nil) ) } }) - val response = OffsetResponse(offsetRequest.correlationId, responseMap) + + val mergedResponseMap = responseMap ++ unauthorizedResponseStatus + val response = OffsetResponse(offsetRequest.correlationId, mergedResponseMap) requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response))) } @@ -480,22 +542,35 @@ class KafkaApis(val requestChannel: RequestChannel, */ def handleTopicMetadataRequest(request: RequestChannel.Request) { val metadataRequest = request.requestObj.asInstanceOf[TopicMetadataRequest] - val topicMetadata = getTopicMetadata(metadataRequest.topics.toSet, request.securityProtocol) + val topics = metadataRequest.topics.toSet + + val (authorizedTopics, unauthorizedTopics) = topics.partition(topic => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC,topic))) + + val unauthorizedTopicMetaData = unauthorizedTopics.map(topic => new TopicMetadata(topic, Seq.empty[PartitionMetadata], ErrorMapping.AuthorizationCode)) + + val topicMetadata = getTopicMetadata(authorizedTopics, request.securityProtocol) val brokers = metadataCache.getAliveBrokers trace("Sending topic metadata %s and brokers %s for correlation id %d to client %s".format(topicMetadata.mkString(","), brokers.mkString(","), metadataRequest.correlationId, metadataRequest.clientId)) - val response = new TopicMetadataResponse(brokers.map(_.getBrokerEndPoint(request.securityProtocol)), topicMetadata, metadataRequest.correlationId) + val response = new TopicMetadataResponse(brokers.map(_.getBrokerEndPoint(request.securityProtocol)), topicMetadata ++ unauthorizedTopicMetaData, metadataRequest.correlationId) requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response))) } /* * Handle an offset fetch request */ + def handleOffsetFetchRequest(request: RequestChannel.Request) { val offsetFetchRequest = request.requestObj.asInstanceOf[OffsetFetchRequest] + val (authorizedTopicPartitions, unauthorizedTopicPartitions) = offsetFetchRequest.requestInfo.partition( + topicAndPartition => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC,topicAndPartition.topic))) + + val authorizationError = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, ErrorMapping.AuthorizationCode) + val unauthorizedStatus = unauthorizedTopicPartitions.map(topicAndPartition => (topicAndPartition, authorizationError)).toMap + val response = if (offsetFetchRequest.versionId == 0) { // version 0 reads offsets from ZK - val responseInfo = offsetFetchRequest.requestInfo.map( topicAndPartition => { + val responseInfo = authorizedTopicPartitions.map( topicAndPartition => { val topicDirs = new ZKGroupTopicDirs(offsetFetchRequest.groupId, topicAndPartition.topic) try { if (metadataCache.getTopicMetadata(Set(topicAndPartition.topic), request.securityProtocol).size <= 0) { @@ -514,15 +589,17 @@ class KafkaApis(val requestChannel: RequestChannel, } }) - OffsetFetchResponse(collection.immutable.Map(responseInfo: _*), offsetFetchRequest.correlationId) + val unauthorizedTopics = unauthorizedTopicPartitions.map( topicAndPartition => + (topicAndPartition, OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata,ErrorMapping.AuthorizationCode))) + OffsetFetchResponse(collection.immutable.Map(responseInfo: _*) ++ unauthorizedTopics, offsetFetchRequest.correlationId) } else { // version 1 reads offsets from Kafka; - val offsets = coordinator.handleFetchOffsets(offsetFetchRequest.groupId, offsetFetchRequest.requestInfo).toMap + val offsets = coordinator.handleFetchOffsets(offsetFetchRequest.groupId, authorizedTopicPartitions).toMap // Note that we do not need to filter the partitions in the // metadata cache as the topic partitions will be filtered // in coordinator's offset manager through the offset cache - OffsetFetchResponse(offsets, offsetFetchRequest.correlationId) + OffsetFetchResponse(offsets ++ unauthorizedStatus, offsetFetchRequest.correlationId) } trace("Sending offset fetch response %s for correlation id %d to client %s." @@ -539,6 +616,12 @@ class KafkaApis(val requestChannel: RequestChannel, val partition = coordinator.partitionFor(consumerMetadataRequest.group) + if (authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC, ConsumerCoordinator.OffsetsTopicName))) { + val errorResponse = ConsumerMetadataResponse(None, ErrorMapping.ConsumerCoordinatorNotAvailableCode, consumerMetadataRequest.correlationId) + requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, errorResponse))) + return + } + // get metadata (and create the topic if necessary) val offsetsTopicMetadata = getTopicMetadata(Set(ConsumerCoordinator.OffsetsTopicName), request.securityProtocol).head @@ -562,10 +645,17 @@ class KafkaApis(val requestChannel: RequestChannel, val joinGroupRequest = request.body.asInstanceOf[JoinGroupRequest] val respHeader = new ResponseHeader(request.header.correlationId) + val (authorizedTopics, unauthorizedTopics) = joinGroupRequest.topics().partition( + topic => (!authorizer.isDefined || authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.TOPIC, topic)) + && authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.CONSUMER_GROUP, joinGroupRequest.groupId())))) + + val unauthorizedTopicPartition = unauthorizedTopics.map(topic => new TopicPartition(topic, -1)) + // the callback for sending a join-group response def sendResponseCallback(partitions: Set[TopicAndPartition], consumerId: String, generationId: Int, errorCode: Short) { - val partitionList = partitions.map(tp => new TopicPartition(tp.topic, tp.partition)).toBuffer - val responseBody = new JoinGroupResponse(errorCode, generationId, consumerId, partitionList) + val partitionList = (partitions.map(tp => new TopicPartition(tp.topic, tp.partition)) ++ unauthorizedTopicPartition).toBuffer + val error = if (errorCode == ErrorMapping.NoError && unauthorizedTopicPartition.nonEmpty) ErrorMapping.AuthorizationCode else errorCode + val responseBody = new JoinGroupResponse(error, generationId, consumerId, partitionList) trace("Sending join group response %s for correlation id %d to client %s." .format(responseBody, request.header.correlationId, request.header.clientId)) requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, respHeader, responseBody))) @@ -575,7 +665,7 @@ class KafkaApis(val requestChannel: RequestChannel, coordinator.handleJoinGroup( joinGroupRequest.groupId(), joinGroupRequest.consumerId(), - joinGroupRequest.topics().toSet, + authorizedTopics.toSet, joinGroupRequest.sessionTimeout(), joinGroupRequest.strategy(), sendResponseCallback) @@ -585,6 +675,12 @@ class KafkaApis(val requestChannel: RequestChannel, val heartbeatRequest = request.body.asInstanceOf[HeartbeatRequest] val respHeader = new ResponseHeader(request.header.correlationId) + if (authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.CONSUMER_GROUP, heartbeatRequest.groupId()))) { + val heartbeatResponse = new HeartbeatResponse(ErrorMapping.AuthorizationCode) + requestChannel.sendResponse(new Response(request, new ResponseSend(request.connectionId, respHeader, heartbeatResponse))) + return + } + // the callback for sending a heartbeat response def sendResponseCallback(errorCode: Short) { val response = new HeartbeatResponse(errorCode) diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index dbe170f..d7fc6b0 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -45,6 +45,10 @@ object Defaults { val BackgroundThreads = 10 val QueuedMaxRequests = 500 + /************* Authorizer Configuration ***********/ + val AuthorizerClassName = "" + val SuperUser = "" + /** ********* Socket Server Configuration ***********/ val Port = 9092 val HostName: String = new String("") @@ -159,6 +163,9 @@ object KafkaConfig { val NumIoThreadsProp = "num.io.threads" val BackgroundThreadsProp = "background.threads" val QueuedMaxRequestsProp = "queued.max.requests" + /************* Authorizer Configuration ***********/ + val AuthorizerClassNameProp = "authorizer.class.name" + val SuperUserProp = "super.users" /** ********* Socket Server Configuration ***********/ val PortProp = "port" val HostNameProp = "host.name" @@ -274,6 +281,9 @@ object KafkaConfig { val NumIoThreadsDoc = "The number of io threads that the server uses for carrying out network requests" val BackgroundThreadsDoc = "The number of threads to use for various background processing tasks" val QueuedMaxRequestsDoc = "The number of queued requests allowed before blocking the network threads" + /************* Authorizer Configuration ***********/ + val AuthorizerClassNameDoc = "The authorizer class that should be used for authorization" + val SuperUserDoc = "Comma seperated list of users that will have super user access to the cluster and all the topics." /** ********* Socket Server Configuration ***********/ val PortDoc = "the port to listen and accept connections on" val HostNameDoc = "hostname of broker. If this is set, it will only bind to this address. If this is not set, it will bind to all interfaces" @@ -420,6 +430,10 @@ object KafkaConfig { .define(BackgroundThreadsProp, INT, Defaults.BackgroundThreads, atLeast(1), HIGH, BackgroundThreadsDoc) .define(QueuedMaxRequestsProp, INT, Defaults.QueuedMaxRequests, atLeast(1), HIGH, QueuedMaxRequestsDoc) + /************* Authorizer Configuration ***********/ + .define(AuthorizerClassNameProp, STRING, Defaults.AuthorizerClassName, LOW, AuthorizerClassNameDoc) + .define(SuperUserProp, STRING, Defaults.SuperUser, LOW, SuperUserDoc) + /** ********* Socket Server Configuration ***********/ .define(PortProp, INT, Defaults.Port, HIGH, PortDoc) .define(HostNameProp, STRING, Defaults.HostName, HIGH, HostNameDoc) @@ -568,6 +582,10 @@ case class KafkaConfig (props: java.util.Map[_, _]) extends AbstractConfig(Kafka val numIoThreads = getInt(KafkaConfig.NumIoThreadsProp) val messageMaxBytes = getInt(KafkaConfig.MessageMaxBytesProp) + /************* Authorizer Configuration ***********/ + val authorizerClassName: String = Defaults.AuthorizerClassName + val superUser: String = Defaults.SuperUser + /** ********* Socket Server Configuration ***********/ val hostName = getString(KafkaConfig.HostNameProp) val port = getInt(KafkaConfig.PortProp) diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 18917bc..d74d2e2 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -17,31 +17,29 @@ package kafka.server +import java.io.File import java.util -import java.util.Properties - -import kafka.admin._ -import kafka.log.LogConfig -import kafka.log.CleanerConfig -import kafka.log.LogManager import java.util.concurrent._ -import atomic.{AtomicInteger, AtomicBoolean} -import java.io.File +import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} +import com.yammer.metrics.core.Gauge +import kafka.admin._ +import kafka.api.{ControlledShutdownRequest, ControlledShutdownResponse} +import kafka.cluster.{Broker, EndPoint} +import kafka.common.{ErrorMapping, GenerateBrokerIdException, InconsistentBrokerIdException} +import kafka.controller.{ControllerStats, KafkaController} +import kafka.coordinator.ConsumerCoordinator +import kafka.log.{CleanerConfig, LogConfig, LogManager} +import kafka.metrics.KafkaMetricsGroup +import kafka.network.{BlockingChannel, SocketServer} +import kafka.security.auth.Authorizer import kafka.utils._ +import org.I0Itec.zkclient.ZkClient import org.apache.kafka.common.metrics._ import org.apache.kafka.common.network.NetworkReceive -import scala.collection.{JavaConversions, mutable} -import org.I0Itec.zkclient.ZkClient -import kafka.controller.{ControllerStats, KafkaController} -import kafka.cluster.{EndPoint, Broker} -import kafka.api.{ControlledShutdownResponse, ControlledShutdownRequest} -import kafka.common.{ErrorMapping, InconsistentBrokerIdException, GenerateBrokerIdException} -import kafka.network.{BlockingChannel, SocketServer} -import kafka.metrics.KafkaMetricsGroup -import com.yammer.metrics.core.Gauge -import kafka.coordinator.{GroupManagerConfig, ConsumerCoordinator} +import scala.collection.mutable + /** * Represents the lifecycle of a single Kafka broker. Handles all functionality required @@ -88,8 +86,6 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg var kafkaHealthcheck: KafkaHealthcheck = null val metadataCache: MetadataCache = new MetadataCache(config.brokerId) - - var zkClient: ZkClient = null val correlationId: AtomicInteger = new AtomicInteger(0) val brokerMetaPropsFile = "meta.properties" @@ -163,9 +159,21 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg consumerCoordinator = ConsumerCoordinator.create(config, zkClient, replicaManager, kafkaScheduler) consumerCoordinator.startup() + + /* Get the authorizer and initialize it if one is specified.*/ + val authorizer: Option[Authorizer] = if(config.authorizerClassName != null && !config.authorizerClassName.isEmpty) { + val authZ: Authorizer = CoreUtils.createObject(config.authorizerClassName) + authZ.initialize(config) + Option(authZ) + } else { + None + } + /* start processing requests */ apis = new KafkaApis(socketServer.requestChannel, replicaManager, consumerCoordinator, - kafkaController, zkClient, config.brokerId, config, metadataCache) + kafkaController, zkClient, config.brokerId, config, metadataCache, authorizer) + + requestHandlerPool = new KafkaRequestHandlerPool(config.brokerId, socketServer.requestChannel, apis, config.numIoThreads) brokerState.newState(RunningAsBroker) diff --git a/core/src/test/resources/acl.json b/core/src/test/resources/acl.json new file mode 100644 index 0000000..97710b3 --- /dev/null +++ b/core/src/test/resources/acl.json @@ -0,0 +1,39 @@ +{ + "version": 1, + "acls": [ + { + "hosts": [ + "host1", + "host2" + ], + "permissionType": "DENY", + "operations": [ + "READ", + "WRITE" + ], + "principal": ["user:alice", "user:bob"] + }, + { + "hosts": [ + "*" + ], + "permissionType": "ALLOW", + "operations": [ + "READ", + "WRITE" + ], + "principal": ["user:bob"] + }, + { + "hosts": [ + "host1", + "host2" + ], + "permissionType": "DENY", + "operations": [ + "read" + ], + "principal": ["user:bob"] + } + ] +} \ No newline at end of file diff --git a/core/src/test/scala/unit/kafka/security/auth/AclTest.scala b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala new file mode 100644 index 0000000..a48fbce --- /dev/null +++ b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package unit.kafka.security.auth + +import kafka.security.auth.{Acl, KafkaPrincipal, Operation, PermissionType} +import kafka.utils.Json +import org.junit.Assert +import org.scalatest.junit.JUnit3Suite + +class AclTest extends JUnit3Suite { + + def testAclJsonConversion(): Unit = { + val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice"), new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl2: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.ALLOW, Set[String]("*"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Operation.READ)) + + val acls: Set[Acl] = Set[Acl](acl1, acl2, acl3) + val jsonAcls: String = Json.encode(Acl.toJsonCompatibleMap(acls)) + Assert.assertEquals(acls, Acl.fromJson(jsonAcls)) + + //test json by reading from a local file. + val path: String = Thread.currentThread().getContextClassLoader.getResource("acl.json").getPath + val source = scala.io.Source.fromFile(path) + Assert.assertEquals(acls, Acl.fromJson(source.mkString)) + source.close() + } + + def testEqualsAndHashCode(): Unit = { + //check equals is not sensitive to case or order for principal,hosts or operations. + val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "bob"), new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl2: Acl = new Acl(Set(new KafkaPrincipal("USER", "ALICE"), new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.ALLOW, Set[String]("HOST2", "HOST1"), Set[Operation](Operation.WRITE, Operation.READ)) + + Assert.assertEquals(acl1, acl2) + Assert.assertEquals(acl1.hashCode(), acl2.hashCode()) + + //if user does not match returns false + val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl4: Acl = new Acl(Set(new KafkaPrincipal("USER", "Bob")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) + Assert.assertFalse(acl3.equals(acl4)) + + //if permission does not match return false + val acl5: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.DENY, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl6: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) + Assert.assertFalse(acl5.equals(acl6)) + + //if hosts do not match return false + val acl7: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.ALLOW, Set[String]("host10", "HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl8: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) + Assert.assertFalse(acl7.equals(acl8)) + + //if Opoerations do not match return false + val acl9: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl10: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.DESCRIBE)) + Assert.assertFalse(acl9.equals(acl10)) + } +} diff --git a/core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala b/core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala new file mode 100644 index 0000000..d6ecb06 --- /dev/null +++ b/core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package unit.kafka.security.auth + +import kafka.security.auth.KafkaPrincipal +import org.junit.Assert +import org.scalatest.junit.JUnit3Suite + +class KafkaPrincipalTest extends JUnit3Suite { + + def testEqualsAndHashCode(): Unit = { + //check equals is not sensitive to case. + val principal1:KafkaPrincipal = KafkaPrincipal.fromString("user:test") + val principal2:KafkaPrincipal = KafkaPrincipal.fromString("USER:TEST") + + Assert.assertEquals(principal1, principal2) + Assert.assertEquals(principal1.hashCode(), principal2.hashCode()) + + //if name does not match returns false + val principal3:KafkaPrincipal = KafkaPrincipal.fromString("user:test") + val principal4:KafkaPrincipal = KafkaPrincipal.fromString("user:test1") + Assert.assertFalse(principal3.equals(principal4)) + + //if type does not match return false + val principal5:KafkaPrincipal = KafkaPrincipal.fromString("user:test") + val principal6:KafkaPrincipal = KafkaPrincipal.fromString("group:test") + Assert.assertFalse(principal5.equals(principal6)) + } +} diff --git a/core/src/test/scala/unit/kafka/security/auth/ResourceTest.scala b/core/src/test/scala/unit/kafka/security/auth/ResourceTest.scala new file mode 100644 index 0000000..0a21416 --- /dev/null +++ b/core/src/test/scala/unit/kafka/security/auth/ResourceTest.scala @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package unit.kafka.security.auth + +import kafka.security.auth.Resource +import kafka.security.auth.ResourceType +import org.junit.Assert +import org.scalatest.junit.JUnit3Suite + +class ResourceTest extends JUnit3Suite { + + def testEqualsAndHashCode(): Unit = { + //check equals is not sensitive to case. + val resource1: Resource = Resource.fromString(ResourceType.TOPIC.name().toLowerCase + ":test") + val resource2: Resource = Resource.fromString(ResourceType.TOPIC.name().toUpperCase() + ":TEST") + Assert.assertEquals(resource1, resource2) + Assert.assertEquals(resource1.hashCode(), resource2.hashCode()) + + val resource3: Resource = Resource.fromString(ResourceType.TOPIC.name() + ":test") + val resource4: Resource = Resource.fromString(ResourceType.TOPIC.name() + ":TEST1") + //if name does not match returns false + Assert.assertFalse(resource3.equals(resource4)) + + //if type does not match return false + val resource5: Resource = Resource.fromString(ResourceType.TOPIC.name() + ":test") + val resource6: Resource = Resource.fromString(ResourceType.CONSUMER_GROUP.name() + ":test") + Assert.assertFalse(resource5.equals(resource6)) + } +} diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala index 04a02e0..6864084 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala @@ -264,6 +264,11 @@ class KafkaConfigConfigDefTest extends JUnit3Suite { case KafkaConfig.BackgroundThreadsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0") case KafkaConfig.QueuedMaxRequestsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0") + case KafkaConfig.AuthorizerClassNameProp => // ignore string + case KafkaConfig.SuperUserProp => //ignore string + case KafkaConfig.PrincipalToLocalProp => //ignore string + case KafkaConfig.AuthorizerConfigPathProp => //ignore string + case KafkaConfig.PortProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number") case KafkaConfig.HostNameProp => // ignore string case KafkaConfig.AdvertisedHostNameProp => //ignore string -- 2.1.3.36.g8e36a6d From aab12b166425ec5bfd9c767a7533c3d9f3b8c744 Mon Sep 17 00:00:00 2001 From: Parth Brahmbhatt Date: Wed, 3 Jun 2015 16:32:08 -0700 Subject: [PATCH 2/8] Addressing review comments from Jun. --- core/src/main/scala/kafka/api/OffsetRequest.scala | 2 +- core/src/main/scala/kafka/security/auth/Acl.scala | 62 +++++++++++++------ .../scala/kafka/security/auth/KafkaPrincipal.scala | 11 ++-- .../main/scala/kafka/security/auth/Resource.scala | 12 ++-- core/src/main/scala/kafka/server/KafkaApis.scala | 71 +++++++++++----------- core/src/test/resources/acl.json | 6 +- .../scala/unit/kafka/security/auth/AclTest.scala | 24 ++++---- .../kafka/server/KafkaConfigConfigDefTest.scala | 2 - 8 files changed, 110 insertions(+), 80 deletions(-) diff --git a/core/src/main/scala/kafka/api/OffsetRequest.scala b/core/src/main/scala/kafka/api/OffsetRequest.scala index f418868..d2c1c95 100644 --- a/core/src/main/scala/kafka/api/OffsetRequest.scala +++ b/core/src/main/scala/kafka/api/OffsetRequest.scala @@ -115,7 +115,7 @@ case class OffsetRequest(requestInfo: Map[TopicAndPartition, PartitionOffsetRequ override def handleError(e: Throwable, requestChannel: RequestChannel, request: RequestChannel.Request): Unit = { val partitionOffsetResponseMap = requestInfo.map { case (topicAndPartition, partitionOffsetRequest) => - (topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]), null)) + (topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]), Nil)) } val errorResponse = OffsetResponse(correlationId, partitionOffsetResponseMap) requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, errorResponse))) diff --git a/core/src/main/scala/kafka/security/auth/Acl.scala b/core/src/main/scala/kafka/security/auth/Acl.scala index c062509..809c6e3 100644 --- a/core/src/main/scala/kafka/security/auth/Acl.scala +++ b/core/src/main/scala/kafka/security/auth/Acl.scala @@ -23,14 +23,40 @@ object Acl { val wildCardPrincipal: KafkaPrincipal = new KafkaPrincipal("user", "*") val wildCardHost: String = "*" val allowAllAcl = new Acl(Set[KafkaPrincipal](wildCardPrincipal), PermissionType.ALLOW, Set[String](wildCardHost), Set[Operation](Operation.ALL)) - val PRINCIPAL_KEY = "principal" - val PERMISSION_TYPE_KEY = "permissionType" - val OPERATIONS_KEY = "operations" - val HOSTS_KEY = "hosts" - val VERSION_KEY = "version" - val CURRENT_VERSION = 1 - val ACLS_KEY = "acls" + val principalKey = "principals" + val permissionTypeKey = "permissionType" + val operationKey = "operations" + val hostsKey = "hosts" + val versionKey = "version" + val currentVersion = 1 + val aclsKey = "acls" + /** + * + * @param aclJson + * + *

+ { + "version": 1, + "acls": [ + { + "hosts": [ + "host1", + "host2" + ], + "permissionType": "DENY", + "operations": [ + "READ", + "WRITE" + ], + "principal": ["user:alice", "user:bob"] + } + ] + } + *

+ * + * @return + */ def fromJson(aclJson: String): Set[Acl] = { if(aclJson == null || aclJson.isEmpty) { return collection.immutable.Set.empty[Acl] @@ -40,13 +66,13 @@ object Acl { case Some(m) => val aclMap = m.asInstanceOf[Map[String, Any]] //the acl json version. - require(aclMap.get(VERSION_KEY).get == CURRENT_VERSION) - val aclSet: List[Map[String, Any]] = aclMap.get(ACLS_KEY).get.asInstanceOf[List[Map[String, Any]]] + require(aclMap(versionKey) == currentVersion) + val aclSet: List[Map[String, Any]] = aclMap.get(aclsKey).get.asInstanceOf[List[Map[String, Any]]] aclSet.foreach(item => { - val principals: List[KafkaPrincipal] = item(PRINCIPAL_KEY).asInstanceOf[List[String]].map(principal => KafkaPrincipal.fromString(principal)) - val permissionType: PermissionType = PermissionType.valueOf(item(PERMISSION_TYPE_KEY).asInstanceOf[String]) - val operations: List[Operation] = item(OPERATIONS_KEY).asInstanceOf[List[String]].map(operation => Operation.fromString(operation)) - val hosts: List[String] = item(HOSTS_KEY).asInstanceOf[List[String]] + val principals: List[KafkaPrincipal] = item(principalKey).asInstanceOf[List[String]].map(principal => KafkaPrincipal.fromString(principal)) + val permissionType: PermissionType = PermissionType.valueOf(item(permissionTypeKey).asInstanceOf[String]) + val operations: List[Operation] = item(operationKey).asInstanceOf[List[String]].map(operation => Operation.fromString(operation)) + val hosts: List[String] = item(hostsKey).asInstanceOf[List[String]] acls += new Acl(principals.toSet, permissionType, hosts.toSet, operations.toSet) }) case None => @@ -56,7 +82,7 @@ object Acl { def toJsonCompatibleMap(acls: Set[Acl]): Map[String,Any] = { acls match { - case aclSet: Set[Acl] => Map(Acl.VERSION_KEY -> Acl.CURRENT_VERSION, Acl.ACLS_KEY -> aclSet.map(acl => acl.toMap).toList) + case aclSet: Set[Acl] => Map(Acl.versionKey -> Acl.currentVersion, Acl.aclsKey -> aclSet.map(acl => acl.toMap).toList) case _ => null } } @@ -82,10 +108,10 @@ class Acl(val principals: Set[KafkaPrincipal],val permissionType: PermissionType */ def toMap() : Map[String, Any] = { val map: collection.mutable.HashMap[String, Any] = new collection.mutable.HashMap[String, Any]() - map.put(Acl.PRINCIPAL_KEY, principals.map(principal => principal.toString)) - map.put(Acl.PERMISSION_TYPE_KEY, permissionType.name()) - map.put(Acl.OPERATIONS_KEY, operations.map(operation => operation.name())) - map.put(Acl.HOSTS_KEY, hosts) + map.put(Acl.principalKey, principals.map(principal => principal.toString)) + map.put(Acl.permissionTypeKey, permissionType.name()) + map.put(Acl.operationKey, operations.map(operation => operation.name())) + map.put(Acl.hostsKey, hosts) map.toMap } diff --git a/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala b/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala index 21f7d44..246d940 100644 --- a/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala +++ b/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala @@ -19,11 +19,11 @@ package kafka.security.auth import java.security.Principal object KafkaPrincipal { - val seperator: String = ":" - val userType: String = "User" + val Seperator: String = ":" + val UserType: String = "User" def fromString(str: String) : KafkaPrincipal = { - val arr: Array[String] = str.split(seperator) + val arr: Array[String] = str.split(Seperator) if(arr.length != 2) { throw new IllegalArgumentException("expected a string in format principalType:principalName but got " + str) @@ -40,12 +40,15 @@ object KafkaPrincipal { */ class KafkaPrincipal(val principalType: String,val name: String) extends Principal { + if(principalType == null || name == null) + throw new IllegalArgumentException("principalType and name can not be null") + override def getName: String = { name } override def toString: String = { - principalType + KafkaPrincipal.seperator + name + principalType + KafkaPrincipal.Seperator + name } override def equals(that: Any): Boolean = { diff --git a/core/src/main/scala/kafka/security/auth/Resource.scala b/core/src/main/scala/kafka/security/auth/Resource.scala index 37fd88b..df81385 100644 --- a/core/src/main/scala/kafka/security/auth/Resource.scala +++ b/core/src/main/scala/kafka/security/auth/Resource.scala @@ -17,15 +17,15 @@ package kafka.security.auth object Resource { - val separator: String = ":" - val clusterResourceName: String = "kafka-cluster" - val clusterResource: Resource = new Resource(ResourceType.CLUSTER,Resource.clusterResourceName) + val Separator: String = ":" + val ClusterResourceName: String = "kafka-cluster" + val ClusterResource: Resource = new Resource(ResourceType.CLUSTER,Resource.ClusterResourceName) def fromString(str: String) : Resource = { - val arr: Array[String] = str.split(separator) + val arr: Array[String] = str.split(Separator) if(arr.length != 2) { - throw new IllegalArgumentException("expected a string in format resourceType:name but got " + str + ".allowed resource types are" + ResourceType.values()) + throw new IllegalArgumentException("Expected a string in format ResourceType:Name but got " + str + ". Allowed resource types are" + ResourceType.values()) } new Resource(ResourceType.fromString(arr(0)), arr(1)) @@ -41,7 +41,7 @@ object Resource { class Resource(val resourceType: ResourceType,val name: String) { override def toString: String = { - resourceType.name() + Resource.separator + name + resourceType.name() + Resource.Separator + name } override def equals(that: Any): Boolean = { diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 13dffce..914f1b5 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -98,10 +98,8 @@ class KafkaApis(val requestChannel: RequestChannel, // stop serving data to clients for the topic being deleted val leaderAndIsrRequest = request.requestObj.asInstanceOf[LeaderAndIsrRequest] - if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.clusterResource)) { - val leaderAndIsrResponse = new LeaderAndIsrResponse(leaderAndIsrRequest.correlationId, Map.empty, ErrorMapping.AuthorizationCode) - requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, leaderAndIsrResponse))) - return + if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.ClusterResource)) { + throw new AuthorizationException("Request " + request + " is not authorized.") } try { @@ -136,10 +134,8 @@ class KafkaApis(val requestChannel: RequestChannel, // stop serving data to clients for the topic being deleted val stopReplicaRequest = request.requestObj.asInstanceOf[StopReplicaRequest] - if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.clusterResource)) { - val stopReplicaResponse = new StopReplicaResponse(stopReplicaRequest.correlationId, Map.empty, ErrorMapping.AuthorizationCode) - requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, stopReplicaResponse))) - return + if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.ClusterResource)) { + throw new AuthorizationException("Request " + request + " is not authorized.") } val (response, error) = replicaManager.stopReplicas(stopReplicaRequest) @@ -151,15 +147,8 @@ class KafkaApis(val requestChannel: RequestChannel, def handleUpdateMetadataRequest(request: RequestChannel.Request) { val updateMetadataRequest = request.requestObj.asInstanceOf[UpdateMetadataRequest] - if(authorizer.isDefined) { - val unauthorizedTopicAndPartition = updateMetadataRequest.partitionStateInfos.filterKeys( - topicAndPartition => !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.clusterResource)).keys - //In this case the response does not allow to selectively report success/failure so if authorization fails, we fail the entire request. - if (unauthorizedTopicAndPartition.nonEmpty) { - val updateMetadataResponse = new UpdateMetadataResponse(updateMetadataRequest.correlationId, ErrorMapping.AuthorizationCode) - requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, updateMetadataResponse))) - return - } + if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.ClusterResource)) { + throw new AuthorizationException("Request " + request + " is not authorized.") } replicaManager.maybeUpdateMetadataCache(updateMetadataRequest, metadataCache) @@ -174,10 +163,8 @@ class KafkaApis(val requestChannel: RequestChannel, // stop serving data to clients for the topic being deleted val controlledShutdownRequest = request.requestObj.asInstanceOf[ControlledShutdownRequest] - if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.clusterResource)) { - val controlledShutdownResponse = new ControlledShutdownResponse(controlledShutdownRequest.correlationId, ErrorMapping.AuthorizationCode, Set.empty) - requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, controlledShutdownResponse))) - return + if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.ClusterResource)) { + throw new AuthorizationException("Request " + request + " is not authorized.") } val partitionsRemaining = controller.shutdownBroker(controlledShutdownRequest.brokerId) @@ -293,8 +280,9 @@ class KafkaApis(val requestChannel: RequestChannel, def handleProducerRequest(request: RequestChannel.Request) { val produceRequest = request.requestObj.asInstanceOf[ProducerRequest] - val (authorizedRequestInfo, unauthorizedRequestInfo) = produceRequest.data.partition( - mapEntry => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.WRITE, new Resource(ResourceType.TOPIC,mapEntry._1.topic))) + val (authorizedRequestInfo, unauthorizedRequestInfo) = produceRequest.data.partition { + case (topicAndPartition, _) => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.WRITE, new Resource(ResourceType.TOPIC,topicAndPartition.topic)) + } // the callback for sending a produce response def sendResponseCallback(responseStatus: Map[TopicAndPartition, ProducerResponseStatus]) { @@ -353,8 +341,9 @@ class KafkaApis(val requestChannel: RequestChannel, def handleFetchRequest(request: RequestChannel.Request) { val fetchRequest = request.requestObj.asInstanceOf[FetchRequest] - val (authorizedRequestInfo, unauthorizedRequestInfo) = fetchRequest.requestInfo.partition( - mapEntry => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.TOPIC,mapEntry._1.topic))) + val (authorizedRequestInfo, unauthorizedRequestInfo) = fetchRequest.requestInfo.partition { + case (topicAndPartition, _) => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.TOPIC, topicAndPartition.topic)) + } val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ => FetchResponsePartitionData(ErrorMapping.AuthorizationCode, -1, MessageSet.Empty)) @@ -395,8 +384,9 @@ class KafkaApis(val requestChannel: RequestChannel, def handleOffsetRequest(request: RequestChannel.Request) { val offsetRequest = request.requestObj.asInstanceOf[OffsetRequest] - val (authorizedRequestInfo, unauthorizedRequestInfo) = offsetRequest.requestInfo.partition( - mapEntry => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC,mapEntry._1.topic))) + val (authorizedRequestInfo, unauthorizedRequestInfo) = offsetRequest.requestInfo.partition { + case (topicAndPartition, _) => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC, topicAndPartition.topic)) + } val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ => PartitionOffsetsResponse(ErrorMapping.AuthorizationCode, Nil)) @@ -544,7 +534,16 @@ class KafkaApis(val requestChannel: RequestChannel, val metadataRequest = request.requestObj.asInstanceOf[TopicMetadataRequest] val topics = metadataRequest.topics.toSet - val (authorizedTopics, unauthorizedTopics) = topics.partition(topic => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC,topic))) + var (authorizedTopics, unauthorizedTopics) = topics.partition(topic => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC,topic))) + + val topicResponses = metadataCache.getTopicMetadata(authorizedTopics, request.securityProtocol) + if(config.autoCreateTopicsEnable && topicResponses.size != authorizedTopics.size) { + val nonExistentTopics: Set[String] = topics -- topicResponses.map(_.topic).toSet + if (authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CREATE, Resource.ClusterResource)) { + authorizedTopics --= nonExistentTopics + unauthorizedTopics ++= nonExistentTopics + } + } val unauthorizedTopicMetaData = unauthorizedTopics.map(topic => new TopicMetadata(topic, Seq.empty[PartitionMetadata], ErrorMapping.AuthorizationCode)) @@ -563,7 +562,8 @@ class KafkaApis(val requestChannel: RequestChannel, val offsetFetchRequest = request.requestObj.asInstanceOf[OffsetFetchRequest] val (authorizedTopicPartitions, unauthorizedTopicPartitions) = offsetFetchRequest.requestInfo.partition( - topicAndPartition => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC,topicAndPartition.topic))) + topicAndPartition => !authorizer.isDefined || (authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC, topicAndPartition.topic)) + && authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.CONSUMER_GROUP, offsetFetchRequest.groupId)))) val authorizationError = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, ErrorMapping.AuthorizationCode) val unauthorizedStatus = unauthorizedTopicPartitions.map(topicAndPartition => (topicAndPartition, authorizationError)).toMap @@ -616,13 +616,16 @@ class KafkaApis(val requestChannel: RequestChannel, val partition = coordinator.partitionFor(consumerMetadataRequest.group) - if (authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC, ConsumerCoordinator.OffsetsTopicName))) { - val errorResponse = ConsumerMetadataResponse(None, ErrorMapping.ConsumerCoordinatorNotAvailableCode, consumerMetadataRequest.correlationId) - requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, errorResponse))) - return + if (authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.CONSUMER_GROUP, consumerMetadataRequest.group))) { + throw new AuthorizationException("Request " + consumerMetadataRequest + " is not authorized to read from consumer group" + consumerMetadataRequest.group ) + } + + if(metadataCache.getTopicMetadata(Set(ConsumerCoordinator.OffsetsTopicName), request.securityProtocol).isEmpty && + authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CREATE, Resource.ClusterResource)) { + throw new AuthorizationException("Request " + consumerMetadataRequest + " is not authorized to create " + ConsumerCoordinator.OffsetsTopicName) } - // get metadata (and create the topic if necessary) + //get metadata (and create the topic if necessary) val offsetsTopicMetadata = getTopicMetadata(Set(ConsumerCoordinator.OffsetsTopicName), request.securityProtocol).head val errorResponse = ConsumerMetadataResponse(None, ErrorMapping.ConsumerCoordinatorNotAvailableCode, consumerMetadataRequest.correlationId) diff --git a/core/src/test/resources/acl.json b/core/src/test/resources/acl.json index 97710b3..ae7fbf8 100644 --- a/core/src/test/resources/acl.json +++ b/core/src/test/resources/acl.json @@ -11,7 +11,7 @@ "READ", "WRITE" ], - "principal": ["user:alice", "user:bob"] + "principals": ["user:alice", "user:bob"] }, { "hosts": [ @@ -22,7 +22,7 @@ "READ", "WRITE" ], - "principal": ["user:bob"] + "principals": ["user:bob"] }, { "hosts": [ @@ -33,7 +33,7 @@ "operations": [ "read" ], - "principal": ["user:bob"] + "principals": ["user:bob"] } ] } \ No newline at end of file diff --git a/core/src/test/scala/unit/kafka/security/auth/AclTest.scala b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala index a48fbce..9bb504a 100644 --- a/core/src/test/scala/unit/kafka/security/auth/AclTest.scala +++ b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala @@ -24,9 +24,9 @@ import org.scalatest.junit.JUnit3Suite class AclTest extends JUnit3Suite { def testAclJsonConversion(): Unit = { - val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice"), new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl2: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.ALLOW, Set[String]("*"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Operation.READ)) + val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice"), new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl2: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("*"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Operation.READ)) val acls: Set[Acl] = Set[Acl](acl1, acl2, acl3) val jsonAcls: String = Json.encode(Acl.toJsonCompatibleMap(acls)) @@ -41,30 +41,30 @@ class AclTest extends JUnit3Suite { def testEqualsAndHashCode(): Unit = { //check equals is not sensitive to case or order for principal,hosts or operations. - val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "bob"), new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl2: Acl = new Acl(Set(new KafkaPrincipal("USER", "ALICE"), new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.ALLOW, Set[String]("HOST2", "HOST1"), Set[Operation](Operation.WRITE, Operation.READ)) + val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob"), new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl2: Acl = new Acl(Set(new KafkaPrincipal("USER", "ALICE"), new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("HOST2", "HOST1"), Set[Operation](Operation.WRITE, Operation.READ)) Assert.assertEquals(acl1, acl2) Assert.assertEquals(acl1.hashCode(), acl2.hashCode()) //if user does not match returns false - val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) val acl4: Acl = new Acl(Set(new KafkaPrincipal("USER", "Bob")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) Assert.assertFalse(acl3.equals(acl4)) //if permission does not match return false - val acl5: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.DENY, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl6: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl5: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.DENY, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl6: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) Assert.assertFalse(acl5.equals(acl6)) //if hosts do not match return false - val acl7: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.ALLOW, Set[String]("host10", "HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl8: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl7: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host10", "HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl8: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) Assert.assertFalse(acl7.equals(acl8)) //if Opoerations do not match return false - val acl9: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl10: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.userType, "bob")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.DESCRIBE)) + val acl9: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl10: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.DESCRIBE)) Assert.assertFalse(acl9.equals(acl10)) } } diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala index 6864084..4062eee 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala @@ -266,8 +266,6 @@ class KafkaConfigConfigDefTest extends JUnit3Suite { case KafkaConfig.AuthorizerClassNameProp => // ignore string case KafkaConfig.SuperUserProp => //ignore string - case KafkaConfig.PrincipalToLocalProp => //ignore string - case KafkaConfig.AuthorizerConfigPathProp => //ignore string case KafkaConfig.PortProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number") case KafkaConfig.HostNameProp => // ignore string -- 2.1.3.36.g8e36a6d From c0a3a8272527d7b366f3d3cc0f13b002dc2ada7b Mon Sep 17 00:00:00 2001 From: Parth Brahmbhatt Date: Thu, 4 Jun 2015 16:06:33 -0700 Subject: [PATCH 3/8] Adding CREATE check for offset topic only if the topic does not exist already. --- core/src/main/scala/kafka/server/KafkaApis.scala | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 914f1b5..87f1569 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -620,9 +620,11 @@ class KafkaApis(val requestChannel: RequestChannel, throw new AuthorizationException("Request " + consumerMetadataRequest + " is not authorized to read from consumer group" + consumerMetadataRequest.group ) } - if(metadataCache.getTopicMetadata(Set(ConsumerCoordinator.OffsetsTopicName), request.securityProtocol).isEmpty && - authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CREATE, Resource.ClusterResource)) { - throw new AuthorizationException("Request " + consumerMetadataRequest + " is not authorized to create " + ConsumerCoordinator.OffsetsTopicName) + val topicResponses = metadataCache.getTopicMetadata(Set(ConsumerCoordinator.OffsetsTopicName), request.securityProtocol) + if(topicResponses.isEmpty) { + if (authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CREATE, Resource.ClusterResource)) { + throw new AuthorizationException("Request " + consumerMetadataRequest + " is not authorized to create " + ConsumerCoordinator.OffsetsTopicName) + } } //get metadata (and create the topic if necessary) -- 2.1.3.36.g8e36a6d From 32e2f2ec5cba2399f695124e92fd54b8c6d674d3 Mon Sep 17 00:00:00 2001 From: Parth Brahmbhatt Date: Thu, 9 Jul 2015 17:37:16 -0700 Subject: [PATCH 4/8] Addressing some more comments. --- .../src/main/scala/kafka/common/ErrorMapping.scala | 2 +- core/src/main/scala/kafka/security/auth/Acl.scala | 4 +- .../scala/kafka/security/auth/KafkaPrincipal.scala | 2 +- .../main/scala/kafka/security/auth/Operation.java | 45 ------------------ .../main/scala/kafka/security/auth/Operation.scala | 50 ++++++++++++++++++++ .../main/scala/kafka/security/auth/Resource.scala | 4 +- .../scala/kafka/security/auth/ResourceType.java | 40 ---------------- .../scala/kafka/security/auth/ResourceType.scala | 55 ++++++++++++++++++++++ core/src/main/scala/kafka/server/KafkaApis.scala | 38 +++++++-------- .../scala/unit/kafka/security/auth/AclTest.scala | 29 ++++++------ .../kafka/security/auth/KafkaPrincipalTest.scala | 9 ++++ .../unit/kafka/security/auth/ResourceTest.scala | 16 +++---- 12 files changed, 162 insertions(+), 132 deletions(-) delete mode 100644 core/src/main/scala/kafka/security/auth/Operation.java create mode 100644 core/src/main/scala/kafka/security/auth/Operation.scala delete mode 100644 core/src/main/scala/kafka/security/auth/ResourceType.java create mode 100644 core/src/main/scala/kafka/security/auth/ResourceType.scala diff --git a/core/src/main/scala/kafka/common/ErrorMapping.scala b/core/src/main/scala/kafka/common/ErrorMapping.scala index 5ad3ae5..fae4d63 100644 --- a/core/src/main/scala/kafka/common/ErrorMapping.scala +++ b/core/src/main/scala/kafka/common/ErrorMapping.scala @@ -53,7 +53,7 @@ object ErrorMapping { val NotEnoughReplicasAfterAppendCode: Short = 20 // 21: InvalidRequiredAcks // 22: IllegalConsumerGeneration - val AuthorizationCode: Short = 24; + val AuthorizationCode: Short = 23; private val exceptionToCode = Map[Class[Throwable], Short]( diff --git a/core/src/main/scala/kafka/security/auth/Acl.scala b/core/src/main/scala/kafka/security/auth/Acl.scala index 809c6e3..b77c145 100644 --- a/core/src/main/scala/kafka/security/auth/Acl.scala +++ b/core/src/main/scala/kafka/security/auth/Acl.scala @@ -22,7 +22,7 @@ import kafka.utils.Json object Acl { val wildCardPrincipal: KafkaPrincipal = new KafkaPrincipal("user", "*") val wildCardHost: String = "*" - val allowAllAcl = new Acl(Set[KafkaPrincipal](wildCardPrincipal), PermissionType.ALLOW, Set[String](wildCardHost), Set[Operation](Operation.ALL)) + val allowAllAcl = new Acl(Set[KafkaPrincipal](wildCardPrincipal), PermissionType.ALLOW, Set[String](wildCardHost), Set[Operation](All)) val principalKey = "principals" val permissionTypeKey = "permissionType" val operationKey = "operations" @@ -110,7 +110,7 @@ class Acl(val principals: Set[KafkaPrincipal],val permissionType: PermissionType val map: collection.mutable.HashMap[String, Any] = new collection.mutable.HashMap[String, Any]() map.put(Acl.principalKey, principals.map(principal => principal.toString)) map.put(Acl.permissionTypeKey, permissionType.name()) - map.put(Acl.operationKey, operations.map(operation => operation.name())) + map.put(Acl.operationKey, operations.map(operation => operation.name)) map.put(Acl.hostsKey, hosts) map.toMap diff --git a/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala b/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala index 246d940..fdb4e4c 100644 --- a/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala +++ b/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala @@ -23,7 +23,7 @@ object KafkaPrincipal { val UserType: String = "User" def fromString(str: String) : KafkaPrincipal = { - val arr: Array[String] = str.split(Seperator) + val arr: Array[String] = str.split(Seperator, 2) //only split in two parts if(arr.length != 2) { throw new IllegalArgumentException("expected a string in format principalType:principalName but got " + str) diff --git a/core/src/main/scala/kafka/security/auth/Operation.java b/core/src/main/scala/kafka/security/auth/Operation.java deleted file mode 100644 index 517c5e0..0000000 --- a/core/src/main/scala/kafka/security/auth/Operation.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.security.auth; - -/** - * Different operations a client may perform on kafka resources. - */ -public enum Operation { - READ, - WRITE, - CREATE, - DELETE, - ALTER, - DESCRIBE, - CLUSTER_ACTION, - ALL; - - /** - * method defined for case insensitive check. the default valueOf() method is case sensitive - */ - public static Operation fromString(String operationName) { - if(operationName != null) { - for(Operation operation: Operation.values()) { - if(operationName.equalsIgnoreCase(operation.name())) { - return operation; - } - } - } - throw new IllegalArgumentException("no matching enum value found for " + operationName); - } -} diff --git a/core/src/main/scala/kafka/security/auth/Operation.scala b/core/src/main/scala/kafka/security/auth/Operation.scala new file mode 100644 index 0000000..1cfdc01 --- /dev/null +++ b/core/src/main/scala/kafka/security/auth/Operation.scala @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.security.auth; + +/** + * Different operations a client may perform on kafka resources. + */ + +sealed trait Operation { def name: String} +case object Read extends Operation { val name: String = "Read" } +case object Write extends Operation { val name: String = "Write" } +case object Create extends Operation { val name: String = "Create" } +case object Delete extends Operation { val name: String = "Delete" } +case object Alter extends Operation { val name: String = "Alter" } +case object Describe extends Operation { val name: String = "Describe" } +case object ClusterAction extends Operation { val name: String = "ClusterAction" } +case object All extends Operation { val name: String = "All" } + +object Operation { + def fromString(operation: String) : Operation = { + operation match { + case operation if operation.equalsIgnoreCase(Read.name) => Read + case operation if operation.equalsIgnoreCase(Write.name) => Write + case operation if operation.equalsIgnoreCase(Create.name) => Create + case operation if operation.equalsIgnoreCase(Delete.name) => Delete + case operation if operation.equalsIgnoreCase(Alter.name) => Alter + case operation if operation.equalsIgnoreCase(Describe.name) => Describe + case operation if operation.equalsIgnoreCase(ClusterAction.name) => ClusterAction + case operation if operation.equalsIgnoreCase(All.name) => All + } + } + + def values() : List[Operation] = { + return List(Read, Write, Create, Delete, Alter, Describe, ClusterAction, All) + } +} diff --git a/core/src/main/scala/kafka/security/auth/Resource.scala b/core/src/main/scala/kafka/security/auth/Resource.scala index df81385..d945b36 100644 --- a/core/src/main/scala/kafka/security/auth/Resource.scala +++ b/core/src/main/scala/kafka/security/auth/Resource.scala @@ -19,7 +19,7 @@ package kafka.security.auth object Resource { val Separator: String = ":" val ClusterResourceName: String = "kafka-cluster" - val ClusterResource: Resource = new Resource(ResourceType.CLUSTER,Resource.ClusterResourceName) + val ClusterResource: Resource = new Resource(Cluster,Resource.ClusterResourceName) def fromString(str: String) : Resource = { val arr: Array[String] = str.split(Separator) @@ -41,7 +41,7 @@ object Resource { class Resource(val resourceType: ResourceType,val name: String) { override def toString: String = { - resourceType.name() + Resource.Separator + name + resourceType.name + Resource.Separator + name } override def equals(that: Any): Boolean = { diff --git a/core/src/main/scala/kafka/security/auth/ResourceType.java b/core/src/main/scala/kafka/security/auth/ResourceType.java deleted file mode 100644 index 70ed1a6..0000000 --- a/core/src/main/scala/kafka/security/auth/ResourceType.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.security.auth; - -/** - * ResourceTypes. - */ -public enum ResourceType { - CLUSTER, - TOPIC, - CONSUMER_GROUP; - - /** - * method defined for case insensitive check. the default valueOf() method is case sensitive - */ - public static ResourceType fromString(String resourceType) { - if(resourceType != null) { - for(ResourceType rType: ResourceType.values()) { - if(resourceType.equalsIgnoreCase(rType.name())) { - return rType; - } - } - } - throw new IllegalArgumentException("no matching enum value found for " + resourceType); - } -} diff --git a/core/src/main/scala/kafka/security/auth/ResourceType.scala b/core/src/main/scala/kafka/security/auth/ResourceType.scala new file mode 100644 index 0000000..db19d16 --- /dev/null +++ b/core/src/main/scala/kafka/security/auth/ResourceType.scala @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.security.auth + +; + +/** + * ResourceTypes. + */ + + +sealed trait ResourceType { + def name: String +} + +case object Cluster extends ResourceType { + val name: String = "Cluster" +} + +case object Topic extends ResourceType { + val name: String = "Topic" +} + +case object ConsumerGroup extends ResourceType { + val name: String = "ConsumerGroup" +} + + +object ResourceType { + def fromString(resourceType: String) : ResourceType = { + return resourceType match { + case resourceType if resourceType.equalsIgnoreCase(Cluster.name) => Cluster + case resourceType if resourceType.equalsIgnoreCase(Topic.name) => Topic + case resourceType if resourceType.equalsIgnoreCase(ConsumerGroup.name) => ConsumerGroup + } + } + + def values() : List[ResourceType] = { + return List(Cluster, Topic, ConsumerGroup) + } +} \ No newline at end of file diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 87f1569..7a5a8a3 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -26,7 +26,7 @@ import kafka.log._ import kafka.message.MessageSet import kafka.network.RequestChannel.Response import kafka.network._ -import kafka.security.auth.{Authorizer, Operation, Resource, ResourceType} +import kafka.security.auth.{Authorizer, Read, Write, Create, ClusterAction, Describe, Resource, Topic, Cluster, ConsumerGroup} import kafka.utils.{Logging, SystemTime, ZKGroupTopicDirs, ZkUtils} import org.I0Itec.zkclient.ZkClient import org.apache.kafka.common.TopicPartition @@ -98,7 +98,7 @@ class KafkaApis(val requestChannel: RequestChannel, // stop serving data to clients for the topic being deleted val leaderAndIsrRequest = request.requestObj.asInstanceOf[LeaderAndIsrRequest] - if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.ClusterResource)) { + if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction , Resource.ClusterResource)) { throw new AuthorizationException("Request " + request + " is not authorized.") } @@ -134,7 +134,7 @@ class KafkaApis(val requestChannel: RequestChannel, // stop serving data to clients for the topic being deleted val stopReplicaRequest = request.requestObj.asInstanceOf[StopReplicaRequest] - if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.ClusterResource)) { + if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction, Resource.ClusterResource)) { throw new AuthorizationException("Request " + request + " is not authorized.") } @@ -147,7 +147,7 @@ class KafkaApis(val requestChannel: RequestChannel, def handleUpdateMetadataRequest(request: RequestChannel.Request) { val updateMetadataRequest = request.requestObj.asInstanceOf[UpdateMetadataRequest] - if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.ClusterResource)) { + if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction, Resource.ClusterResource)) { throw new AuthorizationException("Request " + request + " is not authorized.") } @@ -163,7 +163,7 @@ class KafkaApis(val requestChannel: RequestChannel, // stop serving data to clients for the topic being deleted val controlledShutdownRequest = request.requestObj.asInstanceOf[ControlledShutdownRequest] - if(authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CLUSTER_ACTION, Resource.ClusterResource)) { + if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction, Resource.ClusterResource)) { throw new AuthorizationException("Request " + request + " is not authorized.") } @@ -187,8 +187,8 @@ class KafkaApis(val requestChannel: RequestChannel, val filteredRequestInfo = (offsetCommitRequest.requestInfo -- invalidRequestsInfo.keys) val (authorizedRequestInfo, unauthorizedRequestInfo) = filteredRequestInfo.partition( - mapEntry => !authorizer.isDefined || (authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.TOPIC,mapEntry._1.topic)) && - authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.CONSUMER_GROUP,offsetCommitRequest.groupId)))) + mapEntry => !authorizer.isDefined || (authorizer.get.authorize(request.session, Read, new Resource(Topic, mapEntry._1.topic)) && + authorizer.get.authorize(request.session, Read, new Resource(ConsumerGroup, offsetCommitRequest.groupId)))) // the callback for sending an offset commit response def sendResponseCallback(commitStatus: immutable.Map[TopicAndPartition, Short]) { @@ -281,7 +281,7 @@ class KafkaApis(val requestChannel: RequestChannel, val produceRequest = request.requestObj.asInstanceOf[ProducerRequest] val (authorizedRequestInfo, unauthorizedRequestInfo) = produceRequest.data.partition { - case (topicAndPartition, _) => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.WRITE, new Resource(ResourceType.TOPIC,topicAndPartition.topic)) + case (topicAndPartition, _) => !authorizer.isDefined || authorizer.get.authorize(request.session, Write, new Resource(Topic ,topicAndPartition.topic)) } // the callback for sending a produce response @@ -342,7 +342,7 @@ class KafkaApis(val requestChannel: RequestChannel, val fetchRequest = request.requestObj.asInstanceOf[FetchRequest] val (authorizedRequestInfo, unauthorizedRequestInfo) = fetchRequest.requestInfo.partition { - case (topicAndPartition, _) => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.TOPIC, topicAndPartition.topic)) + case (topicAndPartition, _) => !authorizer.isDefined || authorizer.get.authorize(request.session, Read, new Resource(Topic, topicAndPartition.topic)) } val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ => FetchResponsePartitionData(ErrorMapping.AuthorizationCode, -1, MessageSet.Empty)) @@ -385,7 +385,7 @@ class KafkaApis(val requestChannel: RequestChannel, val offsetRequest = request.requestObj.asInstanceOf[OffsetRequest] val (authorizedRequestInfo, unauthorizedRequestInfo) = offsetRequest.requestInfo.partition { - case (topicAndPartition, _) => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC, topicAndPartition.topic)) + case (topicAndPartition, _) => !authorizer.isDefined || authorizer.get.authorize(request.session, Describe, new Resource(Topic, topicAndPartition.topic)) } val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ => PartitionOffsetsResponse(ErrorMapping.AuthorizationCode, Nil)) @@ -534,12 +534,12 @@ class KafkaApis(val requestChannel: RequestChannel, val metadataRequest = request.requestObj.asInstanceOf[TopicMetadataRequest] val topics = metadataRequest.topics.toSet - var (authorizedTopics, unauthorizedTopics) = topics.partition(topic => !authorizer.isDefined || authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC,topic))) + var (authorizedTopics, unauthorizedTopics) = topics.partition(topic => !authorizer.isDefined || authorizer.get.authorize(request.session, Describe, new Resource(Topic, topic))) val topicResponses = metadataCache.getTopicMetadata(authorizedTopics, request.securityProtocol) if(config.autoCreateTopicsEnable && topicResponses.size != authorizedTopics.size) { val nonExistentTopics: Set[String] = topics -- topicResponses.map(_.topic).toSet - if (authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CREATE, Resource.ClusterResource)) { + if (authorizer.isDefined && !authorizer.get.authorize(request.session, Create, Resource.ClusterResource)) { authorizedTopics --= nonExistentTopics unauthorizedTopics ++= nonExistentTopics } @@ -562,8 +562,8 @@ class KafkaApis(val requestChannel: RequestChannel, val offsetFetchRequest = request.requestObj.asInstanceOf[OffsetFetchRequest] val (authorizedTopicPartitions, unauthorizedTopicPartitions) = offsetFetchRequest.requestInfo.partition( - topicAndPartition => !authorizer.isDefined || (authorizer.get.authorize(request.session, Operation.DESCRIBE, new Resource(ResourceType.TOPIC, topicAndPartition.topic)) - && authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.CONSUMER_GROUP, offsetFetchRequest.groupId)))) + topicAndPartition => !authorizer.isDefined || (authorizer.get.authorize(request.session, Describe, new Resource(Topic, topicAndPartition.topic)) + && authorizer.get.authorize(request.session, Read, new Resource(ConsumerGroup, offsetFetchRequest.groupId)))) val authorizationError = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, ErrorMapping.AuthorizationCode) val unauthorizedStatus = unauthorizedTopicPartitions.map(topicAndPartition => (topicAndPartition, authorizationError)).toMap @@ -616,13 +616,13 @@ class KafkaApis(val requestChannel: RequestChannel, val partition = coordinator.partitionFor(consumerMetadataRequest.group) - if (authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.CONSUMER_GROUP, consumerMetadataRequest.group))) { + if (authorizer.isDefined && !authorizer.get.authorize(request.session, Read, new Resource(ConsumerGroup, consumerMetadataRequest.group))) { throw new AuthorizationException("Request " + consumerMetadataRequest + " is not authorized to read from consumer group" + consumerMetadataRequest.group ) } val topicResponses = metadataCache.getTopicMetadata(Set(ConsumerCoordinator.OffsetsTopicName), request.securityProtocol) if(topicResponses.isEmpty) { - if (authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.CREATE, Resource.ClusterResource)) { + if (authorizer.isDefined && !authorizer.get.authorize(request.session, Create, Resource.ClusterResource)) { throw new AuthorizationException("Request " + consumerMetadataRequest + " is not authorized to create " + ConsumerCoordinator.OffsetsTopicName) } } @@ -651,8 +651,8 @@ class KafkaApis(val requestChannel: RequestChannel, val respHeader = new ResponseHeader(request.header.correlationId) val (authorizedTopics, unauthorizedTopics) = joinGroupRequest.topics().partition( - topic => (!authorizer.isDefined || authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.TOPIC, topic)) - && authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.CONSUMER_GROUP, joinGroupRequest.groupId())))) + topic => (!authorizer.isDefined || authorizer.get.authorize(request.session, Read, new Resource(Topic, topic)) + && authorizer.get.authorize(request.session, Read, new Resource(ConsumerGroup, joinGroupRequest.groupId())))) val unauthorizedTopicPartition = unauthorizedTopics.map(topic => new TopicPartition(topic, -1)) @@ -680,7 +680,7 @@ class KafkaApis(val requestChannel: RequestChannel, val heartbeatRequest = request.body.asInstanceOf[HeartbeatRequest] val respHeader = new ResponseHeader(request.header.correlationId) - if (authorizer.isDefined && !authorizer.get.authorize(request.session, Operation.READ, new Resource(ResourceType.CONSUMER_GROUP, heartbeatRequest.groupId()))) { + if (authorizer.isDefined && !authorizer.get.authorize(request.session, Read, new Resource(ConsumerGroup, heartbeatRequest.groupId()))) { val heartbeatResponse = new HeartbeatResponse(ErrorMapping.AuthorizationCode) requestChannel.sendResponse(new Response(request, new ResponseSend(request.connectionId, respHeader, heartbeatResponse))) return diff --git a/core/src/test/scala/unit/kafka/security/auth/AclTest.scala b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala index 9bb504a..5a460b4 100644 --- a/core/src/test/scala/unit/kafka/security/auth/AclTest.scala +++ b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala @@ -16,17 +16,18 @@ */ package unit.kafka.security.auth -import kafka.security.auth.{Acl, KafkaPrincipal, Operation, PermissionType} +import kafka.security.auth.{Acl, KafkaPrincipal, Operation, Read, Write, Describe, PermissionType} import kafka.utils.Json import org.junit.Assert import org.scalatest.junit.JUnit3Suite class AclTest extends JUnit3Suite { + def testAclJsonConversion(): Unit = { - val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice"), new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl2: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("*"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Operation.READ)) + val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice"), new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Read, Write)) + val acl2: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("*"), Set[Operation](Read, Write)) + val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Read)) val acls: Set[Acl] = Set[Acl](acl1, acl2, acl3) val jsonAcls: String = Json.encode(Acl.toJsonCompatibleMap(acls)) @@ -41,30 +42,30 @@ class AclTest extends JUnit3Suite { def testEqualsAndHashCode(): Unit = { //check equals is not sensitive to case or order for principal,hosts or operations. - val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob"), new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl2: Acl = new Acl(Set(new KafkaPrincipal("USER", "ALICE"), new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("HOST2", "HOST1"), Set[Operation](Operation.WRITE, Operation.READ)) + val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob"), new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Read, Write)) + val acl2: Acl = new Acl(Set(new KafkaPrincipal("USER", "ALICE"), new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("HOST2", "HOST1"), Set[Operation](Write, Read)) Assert.assertEquals(acl1, acl2) Assert.assertEquals(acl1.hashCode(), acl2.hashCode()) //if user does not match returns false - val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl4: Acl = new Acl(Set(new KafkaPrincipal("USER", "Bob")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Read, Write)) + val acl4: Acl = new Acl(Set(new KafkaPrincipal("USER", "Bob")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Read, Write)) Assert.assertFalse(acl3.equals(acl4)) //if permission does not match return false - val acl5: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.DENY, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl6: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl5: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.DENY, Set[String]("host1", "host2"), Set[Operation](Read, Write)) + val acl6: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Read, Write)) Assert.assertFalse(acl5.equals(acl6)) //if hosts do not match return false - val acl7: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host10", "HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl8: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.WRITE)) + val acl7: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host10", "HOST2"), Set[Operation](Read, Write)) + val acl8: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Read, Write)) Assert.assertFalse(acl7.equals(acl8)) //if Opoerations do not match return false - val acl9: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Operation.READ, Operation.WRITE)) - val acl10: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Operation.READ, Operation.DESCRIBE)) + val acl9: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Read, Write)) + val acl10: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Read, Describe)) Assert.assertFalse(acl9.equals(acl10)) } } diff --git a/core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala b/core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala index d6ecb06..65e595d 100644 --- a/core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala +++ b/core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala @@ -40,4 +40,13 @@ class KafkaPrincipalTest extends JUnit3Suite { val principal6:KafkaPrincipal = KafkaPrincipal.fromString("group:test") Assert.assertFalse(principal5.equals(principal6)) } + + def testPrincipalNameCanContainSeparator: Unit = { + val principalType: String = "user" + val name: String = "name:with:" + KafkaPrincipal.Seperator + ":in:it" + + val principal = KafkaPrincipal.fromString(principalType + KafkaPrincipal.Seperator + name) + Assert.assertEquals(principalType, principal.principalType) + Assert.assertEquals(name, principal.name) + } } diff --git a/core/src/test/scala/unit/kafka/security/auth/ResourceTest.scala b/core/src/test/scala/unit/kafka/security/auth/ResourceTest.scala index 0a21416..12a0494 100644 --- a/core/src/test/scala/unit/kafka/security/auth/ResourceTest.scala +++ b/core/src/test/scala/unit/kafka/security/auth/ResourceTest.scala @@ -16,8 +16,8 @@ */ package unit.kafka.security.auth -import kafka.security.auth.Resource -import kafka.security.auth.ResourceType +import kafka.security.auth.{ConsumerGroup, Resource, Topic} + import org.junit.Assert import org.scalatest.junit.JUnit3Suite @@ -25,19 +25,19 @@ class ResourceTest extends JUnit3Suite { def testEqualsAndHashCode(): Unit = { //check equals is not sensitive to case. - val resource1: Resource = Resource.fromString(ResourceType.TOPIC.name().toLowerCase + ":test") - val resource2: Resource = Resource.fromString(ResourceType.TOPIC.name().toUpperCase() + ":TEST") + val resource1: Resource = Resource.fromString(Topic.name.toLowerCase + ":test") + val resource2: Resource = Resource.fromString(Topic.name.toUpperCase() + ":TEST") Assert.assertEquals(resource1, resource2) Assert.assertEquals(resource1.hashCode(), resource2.hashCode()) - val resource3: Resource = Resource.fromString(ResourceType.TOPIC.name() + ":test") - val resource4: Resource = Resource.fromString(ResourceType.TOPIC.name() + ":TEST1") + val resource3: Resource = Resource.fromString(Topic.name + ":test") + val resource4: Resource = Resource.fromString(Topic.name + ":TEST1") //if name does not match returns false Assert.assertFalse(resource3.equals(resource4)) //if type does not match return false - val resource5: Resource = Resource.fromString(ResourceType.TOPIC.name() + ":test") - val resource6: Resource = Resource.fromString(ResourceType.CONSUMER_GROUP.name() + ":test") + val resource5: Resource = Resource.fromString(Topic.name + ":test") + val resource6: Resource = Resource.fromString(ConsumerGroup.name + ":test") Assert.assertFalse(resource5.equals(resource6)) } } -- 2.1.3.36.g8e36a6d From 245d640b41819114d527fd20ae6c3f1d5c3e42e6 Mon Sep 17 00:00:00 2001 From: Parth Brahmbhatt Date: Thu, 9 Jul 2015 17:47:01 -0700 Subject: [PATCH 5/8] Removing acl.json file --- core/src/test/resources/acl.json | 39 ---------------------- .../scala/unit/kafka/security/auth/AclTest.scala | 7 ++-- 2 files changed, 2 insertions(+), 44 deletions(-) delete mode 100644 core/src/test/resources/acl.json diff --git a/core/src/test/resources/acl.json b/core/src/test/resources/acl.json deleted file mode 100644 index ae7fbf8..0000000 --- a/core/src/test/resources/acl.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "version": 1, - "acls": [ - { - "hosts": [ - "host1", - "host2" - ], - "permissionType": "DENY", - "operations": [ - "READ", - "WRITE" - ], - "principals": ["user:alice", "user:bob"] - }, - { - "hosts": [ - "*" - ], - "permissionType": "ALLOW", - "operations": [ - "READ", - "WRITE" - ], - "principals": ["user:bob"] - }, - { - "hosts": [ - "host1", - "host2" - ], - "permissionType": "DENY", - "operations": [ - "read" - ], - "principals": ["user:bob"] - } - ] -} \ No newline at end of file diff --git a/core/src/test/scala/unit/kafka/security/auth/AclTest.scala b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala index 5a460b4..4179d42 100644 --- a/core/src/test/scala/unit/kafka/security/auth/AclTest.scala +++ b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala @@ -24,6 +24,7 @@ import org.scalatest.junit.JUnit3Suite class AclTest extends JUnit3Suite { + val aclJson = "{\"version\": 1, \"acls\": [{\"hosts\": [\"host1\",\"host2\"],\"permissionType\": \"DENY\",\"operations\": [\"READ\",\"WRITE\"],\"principals\": [\"user:alice\", \"user:bob\"] }, { \"hosts\": [ \"*\" ], \"permissionType\": \"ALLOW\", \"operations\": [ \"READ\", \"WRITE\" ], \"principals\": [\"user:bob\"] }, { \"hosts\": [ \"host1\", \"host2\" ], \"permissionType\": \"DENY\", \"operations\": [ \"read\" ], \"principals\": [\"user:bob\"] } ]}" def testAclJsonConversion(): Unit = { val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice"), new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Read, Write)) val acl2: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("*"), Set[Operation](Read, Write)) @@ -33,11 +34,7 @@ class AclTest extends JUnit3Suite { val jsonAcls: String = Json.encode(Acl.toJsonCompatibleMap(acls)) Assert.assertEquals(acls, Acl.fromJson(jsonAcls)) - //test json by reading from a local file. - val path: String = Thread.currentThread().getContextClassLoader.getResource("acl.json").getPath - val source = scala.io.Source.fromFile(path) - Assert.assertEquals(acls, Acl.fromJson(source.mkString)) - source.close() + Assert.assertEquals(acls, Acl.fromJson(aclJson)) } def testEqualsAndHashCode(): Unit = { -- 2.1.3.36.g8e36a6d From 600eba64e884dd6656943d9240dcd9d222699f9c Mon Sep 17 00:00:00 2001 From: Parth Brahmbhatt Date: Mon, 13 Jul 2015 12:47:41 -0700 Subject: [PATCH 6/8] Moving PermissionType to trait instead of enum. Following the convention for defining constants. --- core/src/main/scala/kafka/security/auth/Acl.scala | 44 +++++++++---------- .../scala/kafka/security/auth/KafkaPrincipal.scala | 6 +-- .../scala/kafka/security/auth/PermissionType.java | 22 ---------- .../scala/kafka/security/auth/PermissionType.scala | 50 ++++++++++++++++++++++ .../scala/unit/kafka/security/auth/AclTest.scala | 34 ++++++++------- .../kafka/security/auth/KafkaPrincipalTest.scala | 4 +- 6 files changed, 95 insertions(+), 65 deletions(-) delete mode 100644 core/src/main/scala/kafka/security/auth/PermissionType.java create mode 100644 core/src/main/scala/kafka/security/auth/PermissionType.scala diff --git a/core/src/main/scala/kafka/security/auth/Acl.scala b/core/src/main/scala/kafka/security/auth/Acl.scala index b77c145..3ef9522 100644 --- a/core/src/main/scala/kafka/security/auth/Acl.scala +++ b/core/src/main/scala/kafka/security/auth/Acl.scala @@ -20,16 +20,16 @@ package kafka.security.auth import kafka.utils.Json object Acl { - val wildCardPrincipal: KafkaPrincipal = new KafkaPrincipal("user", "*") - val wildCardHost: String = "*" - val allowAllAcl = new Acl(Set[KafkaPrincipal](wildCardPrincipal), PermissionType.ALLOW, Set[String](wildCardHost), Set[Operation](All)) - val principalKey = "principals" - val permissionTypeKey = "permissionType" - val operationKey = "operations" - val hostsKey = "hosts" - val versionKey = "version" - val currentVersion = 1 - val aclsKey = "acls" + val WildCardPrincipal: KafkaPrincipal = new KafkaPrincipal("user", "*") + val WildCardHost: String = "*" + val AllowAllAcl = new Acl(Set[KafkaPrincipal](WildCardPrincipal), Allow, Set[String](WildCardHost), Set[Operation](All)) + val PrincipalKey = "principals" + val PermissionTypeKey = "permissionType" + val OperationKey = "operations" + val HostsKey = "hosts" + val VersionKey = "version" + val CurrentVersion = 1 + val AclsKey = "acls" /** * @@ -66,13 +66,13 @@ object Acl { case Some(m) => val aclMap = m.asInstanceOf[Map[String, Any]] //the acl json version. - require(aclMap(versionKey) == currentVersion) - val aclSet: List[Map[String, Any]] = aclMap.get(aclsKey).get.asInstanceOf[List[Map[String, Any]]] + require(aclMap(VersionKey) == CurrentVersion) + val aclSet: List[Map[String, Any]] = aclMap.get(AclsKey).get.asInstanceOf[List[Map[String, Any]]] aclSet.foreach(item => { - val principals: List[KafkaPrincipal] = item(principalKey).asInstanceOf[List[String]].map(principal => KafkaPrincipal.fromString(principal)) - val permissionType: PermissionType = PermissionType.valueOf(item(permissionTypeKey).asInstanceOf[String]) - val operations: List[Operation] = item(operationKey).asInstanceOf[List[String]].map(operation => Operation.fromString(operation)) - val hosts: List[String] = item(hostsKey).asInstanceOf[List[String]] + val principals: List[KafkaPrincipal] = item(PrincipalKey).asInstanceOf[List[String]].map(principal => KafkaPrincipal.fromString(principal)) + val permissionType: PermissionType = PermissionType.fromString(item(PermissionTypeKey).asInstanceOf[String]) + val operations: List[Operation] = item(OperationKey).asInstanceOf[List[String]].map(operation => Operation.fromString(operation)) + val hosts: List[String] = item(HostsKey).asInstanceOf[List[String]] acls += new Acl(principals.toSet, permissionType, hosts.toSet, operations.toSet) }) case None => @@ -82,7 +82,7 @@ object Acl { def toJsonCompatibleMap(acls: Set[Acl]): Map[String,Any] = { acls match { - case aclSet: Set[Acl] => Map(Acl.versionKey -> Acl.currentVersion, Acl.aclsKey -> aclSet.map(acl => acl.toMap).toList) + case aclSet: Set[Acl] => Map(Acl.VersionKey -> Acl.CurrentVersion, Acl.AclsKey -> aclSet.map(acl => acl.toMap).toList) case _ => null } } @@ -108,10 +108,10 @@ class Acl(val principals: Set[KafkaPrincipal],val permissionType: PermissionType */ def toMap() : Map[String, Any] = { val map: collection.mutable.HashMap[String, Any] = new collection.mutable.HashMap[String, Any]() - map.put(Acl.principalKey, principals.map(principal => principal.toString)) - map.put(Acl.permissionTypeKey, permissionType.name()) - map.put(Acl.operationKey, operations.map(operation => operation.name)) - map.put(Acl.hostsKey, hosts) + map.put(Acl.PrincipalKey, principals.map(principal => principal.toString)) + map.put(Acl.PermissionTypeKey, permissionType.name) + map.put(Acl.OperationKey, operations.map(operation => operation.name)) + map.put(Acl.HostsKey, hosts) map.toMap } @@ -136,7 +136,7 @@ class Acl(val principals: Set[KafkaPrincipal],val permissionType: PermissionType } override def toString() : String = { - return "%s has %s permission for operations: %s from hosts: %s".format(principals.mkString(","), permissionType.name(), operations.mkString(","), hosts.mkString(",")) + return "%s has %s permission for operations: %s from hosts: %s".format(principals.mkString(","), permissionType.name, operations.mkString(","), hosts.mkString(",")) } } diff --git a/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala b/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala index fdb4e4c..bcdbf0e 100644 --- a/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala +++ b/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala @@ -19,11 +19,11 @@ package kafka.security.auth import java.security.Principal object KafkaPrincipal { - val Seperator: String = ":" + val Separator: String = ":" val UserType: String = "User" def fromString(str: String) : KafkaPrincipal = { - val arr: Array[String] = str.split(Seperator, 2) //only split in two parts + val arr: Array[String] = str.split(Separator, 2) //only split in two parts if(arr.length != 2) { throw new IllegalArgumentException("expected a string in format principalType:principalName but got " + str) @@ -48,7 +48,7 @@ class KafkaPrincipal(val principalType: String,val name: String) extends Princip } override def toString: String = { - principalType + KafkaPrincipal.Seperator + name + principalType + KafkaPrincipal.Separator + name } override def equals(that: Any): Boolean = { diff --git a/core/src/main/scala/kafka/security/auth/PermissionType.java b/core/src/main/scala/kafka/security/auth/PermissionType.java deleted file mode 100644 index b844d41..0000000 --- a/core/src/main/scala/kafka/security/auth/PermissionType.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.security.auth; - -public enum PermissionType { - ALLOW, - DENY -} diff --git a/core/src/main/scala/kafka/security/auth/PermissionType.scala b/core/src/main/scala/kafka/security/auth/PermissionType.scala new file mode 100644 index 0000000..e3a2e97 --- /dev/null +++ b/core/src/main/scala/kafka/security/auth/PermissionType.scala @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.security.auth + +; + +/** + * PermissionType. + */ + + +sealed trait PermissionType { + def name: String +} + +case object Allow extends PermissionType { + val name: String = "Allow" +} + +case object Deny extends PermissionType { + val name: String = "Deny" +} + +object PermissionType { + def fromString(permissionType: String) : PermissionType = { + return permissionType match { + case permissionType if permissionType.equalsIgnoreCase(Allow.name) => Allow + case permissionType if permissionType.equalsIgnoreCase(Deny.name) => Deny + } + } + + def values() : List[PermissionType] = { + return List(Allow, Deny) + } +} + diff --git a/core/src/test/scala/unit/kafka/security/auth/AclTest.scala b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala index 4179d42..699c37f 100644 --- a/core/src/test/scala/unit/kafka/security/auth/AclTest.scala +++ b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala @@ -16,53 +16,55 @@ */ package unit.kafka.security.auth -import kafka.security.auth.{Acl, KafkaPrincipal, Operation, Read, Write, Describe, PermissionType} +import kafka.security.auth._ import kafka.utils.Json import org.junit.Assert import org.scalatest.junit.JUnit3Suite class AclTest extends JUnit3Suite { + val AclJson = "{\"version\": 1, \"acls\": [{\"hosts\": [\"host1\",\"host2\"],\"permissionType\": \"DENY\",\"operations\": [\"READ\",\"WRITE\"],\"principals\": [\"user:alice\", \"user:bob\"] }, " + + "{ \"hosts\": [ \"*\" ], \"permissionType\": \"ALLOW\", \"operations\": [ \"READ\", \"WRITE\" ], \"principals\": [\"user:bob\"] }, " + + "{ \"hosts\": [ \"host1\", \"host2\" ], \"permissionType\": \"DENY\", \"operations\": [ \"read\" ], \"principals\": [\"user:bob\"] } ]}" - val aclJson = "{\"version\": 1, \"acls\": [{\"hosts\": [\"host1\",\"host2\"],\"permissionType\": \"DENY\",\"operations\": [\"READ\",\"WRITE\"],\"principals\": [\"user:alice\", \"user:bob\"] }, { \"hosts\": [ \"*\" ], \"permissionType\": \"ALLOW\", \"operations\": [ \"READ\", \"WRITE\" ], \"principals\": [\"user:bob\"] }, { \"hosts\": [ \"host1\", \"host2\" ], \"permissionType\": \"DENY\", \"operations\": [ \"read\" ], \"principals\": [\"user:bob\"] } ]}" def testAclJsonConversion(): Unit = { - val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice"), new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Read, Write)) - val acl2: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("*"), Set[Operation](Read, Write)) - val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.DENY, Set[String]("host1","host2"), Set[Operation](Read)) + val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice"), new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), Deny, Set[String]("host1","host2"), Set[Operation](Read, Write)) + val acl2: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), Allow, Set[String]("*"), Set[Operation](Read, Write)) + val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), Deny, Set[String]("host1","host2"), Set[Operation](Read)) val acls: Set[Acl] = Set[Acl](acl1, acl2, acl3) val jsonAcls: String = Json.encode(Acl.toJsonCompatibleMap(acls)) Assert.assertEquals(acls, Acl.fromJson(jsonAcls)) - Assert.assertEquals(acls, Acl.fromJson(aclJson)) + Assert.assertEquals(acls, Acl.fromJson(AclJson)) } def testEqualsAndHashCode(): Unit = { //check equals is not sensitive to case or order for principal,hosts or operations. - val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob"), new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Read, Write)) - val acl2: Acl = new Acl(Set(new KafkaPrincipal("USER", "ALICE"), new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("HOST2", "HOST1"), Set[Operation](Write, Read)) + val acl1: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob"), new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), Allow, Set[String]("host1", "host2"), Set[Operation](Read, Write)) + val acl2: Acl = new Acl(Set(new KafkaPrincipal("USER", "ALICE"), new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), Allow, Set[String]("HOST2", "HOST1"), Set[Operation](Write, Read)) Assert.assertEquals(acl1, acl2) Assert.assertEquals(acl1.hashCode(), acl2.hashCode()) //if user does not match returns false - val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Read, Write)) - val acl4: Acl = new Acl(Set(new KafkaPrincipal("USER", "Bob")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Read, Write)) + val acl3: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), Allow, Set[String]("host1", "host2"), Set[Operation](Read, Write)) + val acl4: Acl = new Acl(Set(new KafkaPrincipal("USER", "Bob")), Allow, Set[String]("HOST1","HOST2"), Set[Operation](Read, Write)) Assert.assertFalse(acl3.equals(acl4)) //if permission does not match return false - val acl5: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.DENY, Set[String]("host1", "host2"), Set[Operation](Read, Write)) - val acl6: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Read, Write)) + val acl5: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), Deny, Set[String]("host1", "host2"), Set[Operation](Read, Write)) + val acl6: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), Allow, Set[String]("HOST1","HOST2"), Set[Operation](Read, Write)) Assert.assertFalse(acl5.equals(acl6)) //if hosts do not match return false - val acl7: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("host10", "HOST2"), Set[Operation](Read, Write)) - val acl8: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Read, Write)) + val acl7: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), Allow, Set[String]("host10", "HOST2"), Set[Operation](Read, Write)) + val acl8: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "alice")), Allow, Set[String]("HOST1","HOST2"), Set[Operation](Read, Write)) Assert.assertFalse(acl7.equals(acl8)) //if Opoerations do not match return false - val acl9: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("host1", "host2"), Set[Operation](Read, Write)) - val acl10: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), PermissionType.ALLOW, Set[String]("HOST1","HOST2"), Set[Operation](Read, Describe)) + val acl9: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), Allow, Set[String]("host1", "host2"), Set[Operation](Read, Write)) + val acl10: Acl = new Acl(Set(new KafkaPrincipal(KafkaPrincipal.UserType, "bob")), Allow, Set[String]("HOST1","HOST2"), Set[Operation](Read, Describe)) Assert.assertFalse(acl9.equals(acl10)) } } diff --git a/core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala b/core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala index 65e595d..59b52eb 100644 --- a/core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala +++ b/core/src/test/scala/unit/kafka/security/auth/KafkaPrincipalTest.scala @@ -43,9 +43,9 @@ class KafkaPrincipalTest extends JUnit3Suite { def testPrincipalNameCanContainSeparator: Unit = { val principalType: String = "user" - val name: String = "name:with:" + KafkaPrincipal.Seperator + ":in:it" + val name: String = "name:with:" + KafkaPrincipal.Separator + ":in:it" - val principal = KafkaPrincipal.fromString(principalType + KafkaPrincipal.Seperator + name) + val principal = KafkaPrincipal.fromString(principalType + KafkaPrincipal.Separator + name) Assert.assertEquals(principalType, principal.principalType) Assert.assertEquals(name, principal.name) } -- 2.1.3.36.g8e36a6d From 1f77ed30373488a8a3422cd7b93b0f96d80b89c6 Mon Sep 17 00:00:00 2001 From: Parth Brahmbhatt Date: Mon, 13 Jul 2015 15:06:58 -0700 Subject: [PATCH 7/8] Adding authorizer.config.path back. --- core/src/main/scala/kafka/server/KafkaConfig.scala | 9 +++++++-- .../test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index d7fc6b0..1203672 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -48,6 +48,7 @@ object Defaults { /************* Authorizer Configuration ***********/ val AuthorizerClassName = "" val SuperUser = "" + val AuthorizerConfigPath = "" /** ********* Socket Server Configuration ***********/ val Port = 9092 @@ -166,6 +167,7 @@ object KafkaConfig { /************* Authorizer Configuration ***********/ val AuthorizerClassNameProp = "authorizer.class.name" val SuperUserProp = "super.users" + val AuthorizerConfigPathProp = "authorizer.config.path" /** ********* Socket Server Configuration ***********/ val PortProp = "port" val HostNameProp = "host.name" @@ -284,6 +286,7 @@ object KafkaConfig { /************* Authorizer Configuration ***********/ val AuthorizerClassNameDoc = "The authorizer class that should be used for authorization" val SuperUserDoc = "Comma seperated list of users that will have super user access to the cluster and all the topics." + val AuthorizerConfigPathDoc = "Path to a authorizer configuration property file that will be used by the authorizer implementation." /** ********* Socket Server Configuration ***********/ val PortDoc = "the port to listen and accept connections on" val HostNameDoc = "hostname of broker. If this is set, it will only bind to this address. If this is not set, it will bind to all interfaces" @@ -433,6 +436,7 @@ object KafkaConfig { /************* Authorizer Configuration ***********/ .define(AuthorizerClassNameProp, STRING, Defaults.AuthorizerClassName, LOW, AuthorizerClassNameDoc) .define(SuperUserProp, STRING, Defaults.SuperUser, LOW, SuperUserDoc) + .define(AuthorizerConfigPathProp, STRING, Defaults.AuthorizerConfigPath, LOW, AuthorizerConfigPathDoc) /** ********* Socket Server Configuration ***********/ .define(PortProp, INT, Defaults.Port, HIGH, PortDoc) @@ -583,8 +587,9 @@ case class KafkaConfig (props: java.util.Map[_, _]) extends AbstractConfig(Kafka val messageMaxBytes = getInt(KafkaConfig.MessageMaxBytesProp) /************* Authorizer Configuration ***********/ - val authorizerClassName: String = Defaults.AuthorizerClassName - val superUser: String = Defaults.SuperUser + val authorizerClassName: String = getString(KafkaConfig.AuthorizerClassNameProp) + val superUser: String = getString(KafkaConfig.SuperUserProp) + val authorizerConfigPath: String = getString(KafkaConfig.AuthorizerConfigPathProp) /** ********* Socket Server Configuration ***********/ val hostName = getString(KafkaConfig.HostNameProp) diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala index 4062eee..ff175cb 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala @@ -266,6 +266,7 @@ class KafkaConfigConfigDefTest extends JUnit3Suite { case KafkaConfig.AuthorizerClassNameProp => // ignore string case KafkaConfig.SuperUserProp => //ignore string + case KafkaConfig.AuthorizerConfigPathProp => //ignore string case KafkaConfig.PortProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number") case KafkaConfig.HostNameProp => // ignore string -- 2.1.3.36.g8e36a6d From d971305b050712bbf038ed2f0a03db1f784172c8 Mon Sep 17 00:00:00 2001 From: Parth Brahmbhatt Date: Mon, 20 Jul 2015 16:17:45 -0700 Subject: [PATCH 8/8] Addressing more comments from Jun. --- core/src/main/scala/kafka/security/auth/Acl.scala | 27 +++++++-------- .../scala/kafka/security/auth/Authorizer.scala | 7 ++-- .../scala/kafka/security/auth/KafkaPrincipal.scala | 8 ++--- .../main/scala/kafka/security/auth/Operation.scala | 19 ++++------- .../scala/kafka/security/auth/PermissionType.scala | 10 +++--- .../main/scala/kafka/security/auth/Resource.scala | 10 +++--- .../scala/kafka/security/auth/ResourceType.scala | 14 ++++---- core/src/main/scala/kafka/server/KafkaApis.scala | 34 +++++++++---------- core/src/main/scala/kafka/server/KafkaConfig.scala | 10 ------ core/src/main/scala/kafka/server/KafkaServer.scala | 2 +- .../unit/kafka/security/auth/OperationTest.scala | 38 ++++++++++++++++++++++ .../kafka/security/auth/PermissionTypeTest.scala | 38 ++++++++++++++++++++++ .../kafka/security/auth/ResourceTypeTest.scala | 38 ++++++++++++++++++++++ .../kafka/server/KafkaConfigConfigDefTest.scala | 2 -- 14 files changed, 174 insertions(+), 83 deletions(-) create mode 100644 core/src/test/scala/unit/kafka/security/auth/OperationTest.scala create mode 100644 core/src/test/scala/unit/kafka/security/auth/PermissionTypeTest.scala create mode 100644 core/src/test/scala/unit/kafka/security/auth/ResourceTypeTest.scala diff --git a/core/src/main/scala/kafka/security/auth/Acl.scala b/core/src/main/scala/kafka/security/auth/Acl.scala index 3ef9522..bda6421 100644 --- a/core/src/main/scala/kafka/security/auth/Acl.scala +++ b/core/src/main/scala/kafka/security/auth/Acl.scala @@ -20,7 +20,7 @@ package kafka.security.auth import kafka.utils.Json object Acl { - val WildCardPrincipal: KafkaPrincipal = new KafkaPrincipal("user", "*") + val WildCardPrincipal: KafkaPrincipal = new KafkaPrincipal(KafkaPrincipal.UserType, "*") val WildCardHost: String = "*" val AllowAllAcl = new Acl(Set[KafkaPrincipal](WildCardPrincipal), Allow, Set[String](WildCardHost), Set[Operation](All)) val PrincipalKey = "principals" @@ -58,7 +58,7 @@ object Acl { * @return */ def fromJson(aclJson: String): Set[Acl] = { - if(aclJson == null || aclJson.isEmpty) { + if (aclJson == null || aclJson.isEmpty) { return collection.immutable.Set.empty[Acl] } var acls: collection.mutable.HashSet[Acl] = new collection.mutable.HashSet[Acl]() @@ -67,7 +67,7 @@ object Acl { val aclMap = m.asInstanceOf[Map[String, Any]] //the acl json version. require(aclMap(VersionKey) == CurrentVersion) - val aclSet: List[Map[String, Any]] = aclMap.get(AclsKey).get.asInstanceOf[List[Map[String, Any]]] + val aclSet: List[Map[String, Any]] = aclMap(AclsKey).asInstanceOf[List[Map[String, Any]]] aclSet.foreach(item => { val principals: List[KafkaPrincipal] = item(PrincipalKey).asInstanceOf[List[String]].map(principal => KafkaPrincipal.fromString(principal)) val permissionType: PermissionType = PermissionType.fromString(item(PermissionTypeKey).asInstanceOf[String]) @@ -80,7 +80,7 @@ object Acl { return acls.toSet } - def toJsonCompatibleMap(acls: Set[Acl]): Map[String,Any] = { + def toJsonCompatibleMap(acls: Set[Acl]): Map[String, Any] = { acls match { case aclSet: Set[Acl] => Map(Acl.VersionKey -> Acl.CurrentVersion, Acl.AclsKey -> aclSet.map(acl => acl.toMap).toList) case _ => null @@ -98,7 +98,7 @@ object Acl { * @param hosts A value of * indicates all hosts. * @param operations A value of ALL indicates all operations. */ -class Acl(val principals: Set[KafkaPrincipal],val permissionType: PermissionType,val hosts: Set[String],val operations: Set[Operation]) { +class Acl(val principals: Set[KafkaPrincipal], val permissionType: PermissionType, val hosts: Set[String], val operations: Set[Operation]) { /** * TODO: Ideally we would have a symmetric toJson method but our current json library fails to decode double parsed json strings so @@ -106,21 +106,18 @@ class Acl(val principals: Set[KafkaPrincipal],val permissionType: PermissionType * Convert an acl instance to a map * @return Map representation of the Acl. */ - def toMap() : Map[String, Any] = { - val map: collection.mutable.HashMap[String, Any] = new collection.mutable.HashMap[String, Any]() - map.put(Acl.PrincipalKey, principals.map(principal => principal.toString)) - map.put(Acl.PermissionTypeKey, permissionType.name) - map.put(Acl.OperationKey, operations.map(operation => operation.name)) - map.put(Acl.HostsKey, hosts) - - map.toMap + def toMap(): Map[String, Any] = { + Map(Acl.PrincipalKey -> principals.map(principal => principal.toString), + Acl.PermissionTypeKey -> permissionType.name, + Acl.OperationKey -> operations.map(operation => operation.name), + Acl.HostsKey -> hosts) } override def equals(that: Any): Boolean = { - if(!(that.isInstanceOf[Acl])) + if (!that.isInstanceOf[Acl]) return false val other = that.asInstanceOf[Acl] - if(permissionType.equals(other.permissionType) && operations.equals(other.operations) && principals.equals(other.principals) + if (permissionType.equals(other.permissionType) && operations.equals(other.operations) && principals.equals(other.principals) && hosts.map(host => host.toLowerCase()).equals(other.hosts.map(host=> host.toLowerCase()))) { return true } diff --git a/core/src/main/scala/kafka/security/auth/Authorizer.scala b/core/src/main/scala/kafka/security/auth/Authorizer.scala index 72ab803..42c06ba 100644 --- a/core/src/main/scala/kafka/security/auth/Authorizer.scala +++ b/core/src/main/scala/kafka/security/auth/Authorizer.scala @@ -19,6 +19,7 @@ package kafka.security.auth import kafka.network.RequestChannel.Session import kafka.server.KafkaConfig +import org.apache.kafka.common.Configurable /** * Top level interface that all plugable authorizer must implement. Kafka server will read "authorizer.class" config @@ -29,11 +30,7 @@ import kafka.server.KafkaConfig * From that point onwards, every client request will first be routed to authorize method and the request will only be * authorized if the method returns true. */ -trait Authorizer { - /** - * Guaranteed to be called before any authorize call is made. - */ - def initialize(kafkaConfig: KafkaConfig): Unit +trait Authorizer extends Configurable { /** * @param session The session being authenticated. diff --git a/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala b/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala index bcdbf0e..95e6ff9 100644 --- a/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala +++ b/core/src/main/scala/kafka/security/auth/KafkaPrincipal.scala @@ -38,9 +38,9 @@ object KafkaPrincipal { * @param principalType type of principal user,unixgroup, ldapgroup. * @param name name of the principal */ -class KafkaPrincipal(val principalType: String,val name: String) extends Principal { +class KafkaPrincipal(val principalType: String, val name: String) extends Principal { - if(principalType == null || name == null) + if (principalType == null || name == null) throw new IllegalArgumentException("principalType and name can not be null") override def getName: String = { @@ -52,10 +52,10 @@ class KafkaPrincipal(val principalType: String,val name: String) extends Princip } override def equals(that: Any): Boolean = { - if(!(that.isInstanceOf[KafkaPrincipal])) + if (!(that.isInstanceOf[KafkaPrincipal])) return false val other: KafkaPrincipal = that.asInstanceOf[KafkaPrincipal] - if(principalType.equalsIgnoreCase(other.principalType) && name.equalsIgnoreCase(other.name)) + if (principalType.equalsIgnoreCase(other.principalType) && name.equalsIgnoreCase(other.name)) return true false } diff --git a/core/src/main/scala/kafka/security/auth/Operation.scala b/core/src/main/scala/kafka/security/auth/Operation.scala index 1cfdc01..1a3693d 100644 --- a/core/src/main/scala/kafka/security/auth/Operation.scala +++ b/core/src/main/scala/kafka/security/auth/Operation.scala @@ -14,7 +14,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package kafka.security.auth; +package kafka.security.auth + +import kafka.common.KafkaException +; /** * Different operations a client may perform on kafka resources. @@ -31,17 +34,9 @@ case object ClusterAction extends Operation { val name: String = "ClusterAction" case object All extends Operation { val name: String = "All" } object Operation { - def fromString(operation: String) : Operation = { - operation match { - case operation if operation.equalsIgnoreCase(Read.name) => Read - case operation if operation.equalsIgnoreCase(Write.name) => Write - case operation if operation.equalsIgnoreCase(Create.name) => Create - case operation if operation.equalsIgnoreCase(Delete.name) => Delete - case operation if operation.equalsIgnoreCase(Alter.name) => Alter - case operation if operation.equalsIgnoreCase(Describe.name) => Describe - case operation if operation.equalsIgnoreCase(ClusterAction.name) => ClusterAction - case operation if operation.equalsIgnoreCase(All.name) => All - } + def fromString(operation: String): Operation = { + val op = values().filter(op => op.name.equalsIgnoreCase(operation)).headOption + op.getOrElse(throw new KafkaException(operation + " not a valid operation name. The valid names are " + values().mkString(","))) } def values() : List[Operation] = { diff --git a/core/src/main/scala/kafka/security/auth/PermissionType.scala b/core/src/main/scala/kafka/security/auth/PermissionType.scala index e3a2e97..8ed05a2 100644 --- a/core/src/main/scala/kafka/security/auth/PermissionType.scala +++ b/core/src/main/scala/kafka/security/auth/PermissionType.scala @@ -16,6 +16,8 @@ */ package kafka.security.auth +import kafka.common.KafkaException + ; /** @@ -36,11 +38,9 @@ case object Deny extends PermissionType { } object PermissionType { - def fromString(permissionType: String) : PermissionType = { - return permissionType match { - case permissionType if permissionType.equalsIgnoreCase(Allow.name) => Allow - case permissionType if permissionType.equalsIgnoreCase(Deny.name) => Deny - } + def fromString(permissionType: String): PermissionType = { + val pType = values().filter(pType => pType.name.equalsIgnoreCase(permissionType)).headOption + pType.getOrElse(throw new KafkaException(permissionType + " not a valid permissionType name. The valid names are " + values().mkString(","))) } def values() : List[PermissionType] = { diff --git a/core/src/main/scala/kafka/security/auth/Resource.scala b/core/src/main/scala/kafka/security/auth/Resource.scala index d945b36..b4dd59a 100644 --- a/core/src/main/scala/kafka/security/auth/Resource.scala +++ b/core/src/main/scala/kafka/security/auth/Resource.scala @@ -22,9 +22,9 @@ object Resource { val ClusterResource: Resource = new Resource(Cluster,Resource.ClusterResourceName) def fromString(str: String) : Resource = { - val arr: Array[String] = str.split(Separator) + val arr: Array[String] = str.split(Separator, 2) - if(arr.length != 2) { + if (arr.length != 2) { throw new IllegalArgumentException("Expected a string in format ResourceType:Name but got " + str + ". Allowed resource types are" + ResourceType.values()) } @@ -38,17 +38,17 @@ object Resource { * @param name name of the resource, for topic this will be topic name , for group it will be group name. For cluster type * it will be a constant string kafka-cluster. */ -class Resource(val resourceType: ResourceType,val name: String) { +class Resource(val resourceType: ResourceType, val name: String) { override def toString: String = { resourceType.name + Resource.Separator + name } override def equals(that: Any): Boolean = { - if(!(that.isInstanceOf[Resource])) + if (!(that.isInstanceOf[Resource])) return false val other: Resource = that.asInstanceOf[Resource] - if(resourceType.equals(other.resourceType) && name.equalsIgnoreCase(other.name)) + if (resourceType.equals(other.resourceType) && name.equalsIgnoreCase(other.name)) return true false } diff --git a/core/src/main/scala/kafka/security/auth/ResourceType.scala b/core/src/main/scala/kafka/security/auth/ResourceType.scala index db19d16..66ffb0d 100644 --- a/core/src/main/scala/kafka/security/auth/ResourceType.scala +++ b/core/src/main/scala/kafka/security/auth/ResourceType.scala @@ -16,6 +16,8 @@ */ package kafka.security.auth +import kafka.common.KafkaException + ; /** @@ -41,15 +43,13 @@ case object ConsumerGroup extends ResourceType { object ResourceType { - def fromString(resourceType: String) : ResourceType = { - return resourceType match { - case resourceType if resourceType.equalsIgnoreCase(Cluster.name) => Cluster - case resourceType if resourceType.equalsIgnoreCase(Topic.name) => Topic - case resourceType if resourceType.equalsIgnoreCase(ConsumerGroup.name) => ConsumerGroup - } + + def fromString(resourceType: String): ResourceType = { + val rType = values().filter(rType => rType.name.equalsIgnoreCase(resourceType)).headOption + rType.getOrElse(throw new KafkaException(resourceType + " not a valid resourceType name. The valid names are " + values().mkString(","))) } def values() : List[ResourceType] = { - return List(Cluster, Topic, ConsumerGroup) + List(Cluster, Topic, ConsumerGroup) } } \ No newline at end of file diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 7a5a8a3..20ced64 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -98,9 +98,8 @@ class KafkaApis(val requestChannel: RequestChannel, // stop serving data to clients for the topic being deleted val leaderAndIsrRequest = request.requestObj.asInstanceOf[LeaderAndIsrRequest] - if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction , Resource.ClusterResource)) { - throw new AuthorizationException("Request " + request + " is not authorized.") - } + if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction , Resource.ClusterResource)) + throw new AuthorizationException("Request " + leaderAndIsrRequest + " is not authorized.") try { // call replica manager to handle updating partitions to become leader or follower @@ -134,9 +133,8 @@ class KafkaApis(val requestChannel: RequestChannel, // stop serving data to clients for the topic being deleted val stopReplicaRequest = request.requestObj.asInstanceOf[StopReplicaRequest] - if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction, Resource.ClusterResource)) { - throw new AuthorizationException("Request " + request + " is not authorized.") - } + if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction, Resource.ClusterResource)) + throw new AuthorizationException("Request " + stopReplicaRequest + " is not authorized.") val (response, error) = replicaManager.stopReplicas(stopReplicaRequest) val stopReplicaResponse = new StopReplicaResponse(stopReplicaRequest.correlationId, response.toMap, error) @@ -147,9 +145,8 @@ class KafkaApis(val requestChannel: RequestChannel, def handleUpdateMetadataRequest(request: RequestChannel.Request) { val updateMetadataRequest = request.requestObj.asInstanceOf[UpdateMetadataRequest] - if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction, Resource.ClusterResource)) { - throw new AuthorizationException("Request " + request + " is not authorized.") - } + if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction, Resource.ClusterResource)) + throw new AuthorizationException("Request " + updateMetadataRequest + " is not authorized.") replicaManager.maybeUpdateMetadataCache(updateMetadataRequest, metadataCache) @@ -163,9 +160,8 @@ class KafkaApis(val requestChannel: RequestChannel, // stop serving data to clients for the topic being deleted val controlledShutdownRequest = request.requestObj.asInstanceOf[ControlledShutdownRequest] - if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction, Resource.ClusterResource)) { - throw new AuthorizationException("Request " + request + " is not authorized.") - } + if(authorizer.isDefined && !authorizer.get.authorize(request.session, ClusterAction, Resource.ClusterResource)) + throw new AuthorizationException("Request " + controlledShutdownRequest + " is not authorized.") val partitionsRemaining = controller.shutdownBroker(controlledShutdownRequest.brokerId) val controlledShutdownResponse = new ControlledShutdownResponse(controlledShutdownRequest.correlationId, @@ -621,7 +617,7 @@ class KafkaApis(val requestChannel: RequestChannel, } val topicResponses = metadataCache.getTopicMetadata(Set(ConsumerCoordinator.OffsetsTopicName), request.securityProtocol) - if(topicResponses.isEmpty) { + if (topicResponses.isEmpty) { if (authorizer.isDefined && !authorizer.get.authorize(request.session, Create, Resource.ClusterResource)) { throw new AuthorizationException("Request " + consumerMetadataRequest + " is not authorized to create " + ConsumerCoordinator.OffsetsTopicName) } @@ -654,13 +650,17 @@ class KafkaApis(val requestChannel: RequestChannel, topic => (!authorizer.isDefined || authorizer.get.authorize(request.session, Read, new Resource(Topic, topic)) && authorizer.get.authorize(request.session, Read, new Resource(ConsumerGroup, joinGroupRequest.groupId())))) - val unauthorizedTopicPartition = unauthorizedTopics.map(topic => new TopicPartition(topic, -1)) - // the callback for sending a join-group response def sendResponseCallback(partitions: Set[TopicAndPartition], consumerId: String, generationId: Int, errorCode: Short) { - val partitionList = (partitions.map(tp => new TopicPartition(tp.topic, tp.partition)) ++ unauthorizedTopicPartition).toBuffer - val error = if (errorCode == ErrorMapping.NoError && unauthorizedTopicPartition.nonEmpty) ErrorMapping.AuthorizationCode else errorCode + val error = if (errorCode == ErrorMapping.NoError && unauthorizedTopics.nonEmpty) ErrorMapping.AuthorizationCode else errorCode + + val partitionList = if (error == ErrorMapping.NoError) + partitions.map(tp => new TopicPartition(tp.topic, tp.partition)).toBuffer + else + List.empty.toBuffer + val responseBody = new JoinGroupResponse(error, generationId, consumerId, partitionList) + trace("Sending join group response %s for correlation id %d to client %s." .format(responseBody, request.header.correlationId, request.header.clientId)) requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, respHeader, responseBody))) diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 1203672..edfb560 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -47,8 +47,6 @@ object Defaults { /************* Authorizer Configuration ***********/ val AuthorizerClassName = "" - val SuperUser = "" - val AuthorizerConfigPath = "" /** ********* Socket Server Configuration ***********/ val Port = 9092 @@ -166,8 +164,6 @@ object KafkaConfig { val QueuedMaxRequestsProp = "queued.max.requests" /************* Authorizer Configuration ***********/ val AuthorizerClassNameProp = "authorizer.class.name" - val SuperUserProp = "super.users" - val AuthorizerConfigPathProp = "authorizer.config.path" /** ********* Socket Server Configuration ***********/ val PortProp = "port" val HostNameProp = "host.name" @@ -285,8 +281,6 @@ object KafkaConfig { val QueuedMaxRequestsDoc = "The number of queued requests allowed before blocking the network threads" /************* Authorizer Configuration ***********/ val AuthorizerClassNameDoc = "The authorizer class that should be used for authorization" - val SuperUserDoc = "Comma seperated list of users that will have super user access to the cluster and all the topics." - val AuthorizerConfigPathDoc = "Path to a authorizer configuration property file that will be used by the authorizer implementation." /** ********* Socket Server Configuration ***********/ val PortDoc = "the port to listen and accept connections on" val HostNameDoc = "hostname of broker. If this is set, it will only bind to this address. If this is not set, it will bind to all interfaces" @@ -435,8 +429,6 @@ object KafkaConfig { /************* Authorizer Configuration ***********/ .define(AuthorizerClassNameProp, STRING, Defaults.AuthorizerClassName, LOW, AuthorizerClassNameDoc) - .define(SuperUserProp, STRING, Defaults.SuperUser, LOW, SuperUserDoc) - .define(AuthorizerConfigPathProp, STRING, Defaults.AuthorizerConfigPath, LOW, AuthorizerConfigPathDoc) /** ********* Socket Server Configuration ***********/ .define(PortProp, INT, Defaults.Port, HIGH, PortDoc) @@ -588,8 +580,6 @@ case class KafkaConfig (props: java.util.Map[_, _]) extends AbstractConfig(Kafka /************* Authorizer Configuration ***********/ val authorizerClassName: String = getString(KafkaConfig.AuthorizerClassNameProp) - val superUser: String = getString(KafkaConfig.SuperUserProp) - val authorizerConfigPath: String = getString(KafkaConfig.AuthorizerConfigPathProp) /** ********* Socket Server Configuration ***********/ val hostName = getString(KafkaConfig.HostNameProp) diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index d74d2e2..0a5fbf7 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -163,7 +163,7 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg /* Get the authorizer and initialize it if one is specified.*/ val authorizer: Option[Authorizer] = if(config.authorizerClassName != null && !config.authorizerClassName.isEmpty) { val authZ: Authorizer = CoreUtils.createObject(config.authorizerClassName) - authZ.initialize(config) + authZ.configure(config.originals()) Option(authZ) } else { None diff --git a/core/src/test/scala/unit/kafka/security/auth/OperationTest.scala b/core/src/test/scala/unit/kafka/security/auth/OperationTest.scala new file mode 100644 index 0000000..a45d434 --- /dev/null +++ b/core/src/test/scala/unit/kafka/security/auth/OperationTest.scala @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package unit.kafka.security.auth + +import kafka.common.{KafkaException} +import kafka.security.auth.{Operation, Read} +import org.junit.Assert +import org.scalatest.junit.JUnit3Suite + +class OperationTest extends JUnit3Suite { + + def testFromString(): Unit = { + val op: Operation = Operation.fromString("READ") + Assert.assertEquals(Read, op) + + try { + Operation.fromString("badName") + fail("Expected exception on invalid operation name.") + } catch { + case e: KafkaException => "Expected." + } + } + +} diff --git a/core/src/test/scala/unit/kafka/security/auth/PermissionTypeTest.scala b/core/src/test/scala/unit/kafka/security/auth/PermissionTypeTest.scala new file mode 100644 index 0000000..47b8923 --- /dev/null +++ b/core/src/test/scala/unit/kafka/security/auth/PermissionTypeTest.scala @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package unit.kafka.security.auth + +import kafka.common.KafkaException +import kafka.security.auth.{Allow, PermissionType} +import org.junit.Assert +import org.scalatest.junit.JUnit3Suite + +class PermissionTypeTest extends JUnit3Suite { + + def testFromString(): Unit = { + val permissionType: PermissionType = PermissionType.fromString("Allow") + Assert.assertEquals(Allow, permissionType) + + try { + PermissionType.fromString("badName") + fail("Expected exception on invalid PermissionType name.") + } catch { + case e: KafkaException => "Expected." + } + } + +} diff --git a/core/src/test/scala/unit/kafka/security/auth/ResourceTypeTest.scala b/core/src/test/scala/unit/kafka/security/auth/ResourceTypeTest.scala new file mode 100644 index 0000000..4c97732 --- /dev/null +++ b/core/src/test/scala/unit/kafka/security/auth/ResourceTypeTest.scala @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package unit.kafka.security.auth + +import kafka.common.KafkaException +import kafka.security.auth.{ResourceType, Topic} +import org.junit.Assert +import org.scalatest.junit.JUnit3Suite + +class ResourceTypeTest extends JUnit3Suite { + + def testFromString(): Unit = { + val resourceType: ResourceType = ResourceType.fromString("Topic") + Assert.assertEquals(Topic, resourceType) + + try { + ResourceType.fromString("badName") + fail("Expected exception on invalid ResourceType name.") + } catch { + case e: KafkaException => "Expected." + } + } + +} diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala index ff175cb..1e1dc75 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala @@ -265,8 +265,6 @@ class KafkaConfigConfigDefTest extends JUnit3Suite { case KafkaConfig.QueuedMaxRequestsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0") case KafkaConfig.AuthorizerClassNameProp => // ignore string - case KafkaConfig.SuperUserProp => //ignore string - case KafkaConfig.AuthorizerConfigPathProp => //ignore string case KafkaConfig.PortProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number") case KafkaConfig.HostNameProp => // ignore string -- 2.1.3.36.g8e36a6d