From ba67bba7d846b9a43b280d2201e7dfb7ca1031bf Mon Sep 17 00:00:00 2001 From: Gwen Shapira Date: Thu, 4 Dec 2014 21:50:21 -0800 Subject: [PATCH 1/9] first commit of refactoring. --- .../org/apache/kafka/clients/NetworkClient.java | 9 +- .../kafka/clients/producer/KafkaProducer.java | 3 +- .../kafka/clients/producer/ProducerConfig.java | 13 ++- .../org/apache/kafka/common/protocol/Protocol.java | 12 ++- .../kafka/common/requests/MetadataRequest.java | 26 ++++- .../apache/kafka/clients/NetworkClientTest.java | 2 +- .../kafka/common/requests/RequestResponseTest.java | 2 +- core/src/main/scala/kafka/admin/AdminUtils.scala | 9 +- core/src/main/scala/kafka/admin/TopicCommand.scala | 4 +- .../scala/kafka/api/ConsumerMetadataRequest.scala | 16 +++- .../scala/kafka/api/ConsumerMetadataResponse.scala | 8 +- core/src/main/scala/kafka/api/TopicMetadata.scala | 26 +++-- .../scala/kafka/api/TopicMetadataRequest.scala | 29 +++++- .../scala/kafka/api/TopicMetadataResponse.scala | 14 ++- core/src/main/scala/kafka/client/ClientUtils.scala | 26 ++--- core/src/main/scala/kafka/cluster/Broker.scala | 106 +++++++++++++++++---- .../main/scala/kafka/cluster/BrokerEndPoint.scala | 59 ++++++++++++ core/src/main/scala/kafka/cluster/EndPoint.scala | 58 +++++++++++ .../main/scala/kafka/cluster/ProtocolType.scala | 26 +++++ .../BrokerEndPointNotAvailableException.scala | 22 +++++ .../main/scala/kafka/consumer/ConsumerConfig.scala | 5 + .../kafka/consumer/ConsumerFetcherManager.scala | 6 +- .../kafka/consumer/ConsumerFetcherThread.scala | 4 +- .../consumer/ZookeeperConsumerConnector.scala | 3 +- .../controller/ControllerChannelManager.scala | 3 +- .../scala/kafka/controller/KafkaController.scala | 6 +- .../kafka/javaapi/ConsumerMetadataResponse.scala | 4 +- .../main/scala/kafka/javaapi/TopicMetadata.scala | 2 +- .../scala/kafka/javaapi/TopicMetadataRequest.scala | 11 ++- .../main/scala/kafka/network/BlockingChannel.scala | 5 +- .../main/scala/kafka/network/SocketServer.scala | 30 ++++-- .../main/scala/kafka/producer/ProducerConfig.scala | 4 + .../main/scala/kafka/producer/ProducerPool.scala | 12 ++- .../kafka/server/AbstractFetcherManager.scala | 2 +- .../scala/kafka/server/AbstractFetcherThread.scala | 4 +- core/src/main/scala/kafka/server/KafkaApis.scala | 6 +- core/src/main/scala/kafka/server/KafkaConfig.scala | 28 +++--- .../main/scala/kafka/server/KafkaHealthcheck.scala | 13 +-- core/src/main/scala/kafka/server/KafkaServer.scala | 11 ++- .../scala/kafka/server/ReplicaFetcherThread.scala | 2 +- .../scala/kafka/tools/ConsumerOffsetChecker.scala | 3 +- .../main/scala/kafka/tools/GetOffsetShell.scala | 4 +- .../kafka/tools/ReplicaVerificationTool.scala | 6 +- .../scala/kafka/tools/SimpleConsumerShell.scala | 14 ++- .../main/scala/kafka/tools/UpdateOffsetsInZK.scala | 5 +- core/src/main/scala/kafka/utils/Utils.scala | 8 ++ core/src/main/scala/kafka/utils/ZkUtils.scala | 17 +++- .../kafka/api/ProducerFailureHandlingTest.scala | 12 ++- .../integration/kafka/api/ProducerSendTest.scala | 8 +- .../test/scala/other/kafka/TestOffsetManager.scala | 5 +- .../scala/unit/kafka/admin/AddPartitionsTest.scala | 10 +- .../api/RequestResponseSerializationTest.scala | 12 ++- .../test/scala/unit/kafka/cluster/BrokerTest.scala | 94 ++++++++++++++++++ .../unit/kafka/consumer/ConsumerIteratorTest.scala | 4 +- .../scala/unit/kafka/integration/FetcherTest.scala | 4 +- .../unit/kafka/integration/TopicMetadataTest.scala | 14 +-- .../unit/kafka/network/SocketServerTest.scala | 6 +- .../unit/kafka/producer/SyncProducerTest.scala | 25 +++-- .../unit/kafka/server/AdvertiseBrokerTest.scala | 11 ++- .../scala/unit/kafka/server/KafkaConfigTest.scala | 21 ++-- .../unit/kafka/server/LeaderElectionTest.scala | 5 +- .../scala/unit/kafka/server/LogOffsetTest.scala | 2 +- .../test/scala/unit/kafka/utils/TestUtils.scala | 13 ++- 63 files changed, 721 insertions(+), 213 deletions(-) create mode 100644 core/src/main/scala/kafka/cluster/BrokerEndPoint.scala create mode 100644 core/src/main/scala/kafka/cluster/EndPoint.scala create mode 100644 core/src/main/scala/kafka/cluster/ProtocolType.scala create mode 100644 core/src/main/scala/kafka/common/BrokerEndPointNotAvailableException.scala create mode 100644 core/src/test/scala/unit/kafka/cluster/BrokerTest.scala diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index 525b95e..13387ba 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -80,13 +80,17 @@ public class NetworkClient implements KafkaClient { /* the last timestamp when no broker node is available to connect */ private long lastNoNodeAvailableMs; + /* protocol used for communication to brokers */ + private String securityProtocol; + public NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, int socketSendBuffer, - int socketReceiveBuffer) { + int socketReceiveBuffer, + String securityProtocol) { this.selector = selector; this.metadata = metadata; this.clientId = clientId; @@ -98,6 +102,7 @@ public class NetworkClient implements KafkaClient { this.nodeIndexOffset = new Random().nextInt(Integer.MAX_VALUE); this.metadataFetchInProgress = false; this.lastNoNodeAvailableMs = 0; + this.securityProtocol = securityProtocol; } /** @@ -362,7 +367,7 @@ public class NetworkClient implements KafkaClient { * Create a metadata request for the given topics */ private ClientRequest metadataRequest(long now, int node, Set topics) { - MetadataRequest metadata = new MetadataRequest(new ArrayList(topics)); + MetadataRequest metadata = new MetadataRequest(new ArrayList(topics),securityProtocol); RequestSend send = new RequestSend(node, nextRequestHeader(ApiKeys.METADATA), metadata.toStruct()); return new ClientRequest(now, true, send, null); } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index f61efb3..96487ae 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -162,7 +162,8 @@ public class KafkaProducer implements Producer { config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION), config.getLong(ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG), config.getInt(ProducerConfig.SEND_BUFFER_CONFIG), - config.getInt(ProducerConfig.RECEIVE_BUFFER_CONFIG)); + config.getInt(ProducerConfig.RECEIVE_BUFFER_CONFIG), + config.getString(ProducerConfig.SECURITY_PROTOCOL)); this.sender = new Sender(client, this.metadata, this.accumulator, diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index a893d88..03d2fee 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -173,7 +173,6 @@ public class ProducerConfig extends AbstractConfig { private static final String MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DOC = "The maximum number of unacknowledged requests the client will send on a single connection before blocking." + " Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of" + " message re-ordering due to retries (i.e., if retries are enabled)."; - /** key.serializer */ public static final String KEY_SERIALIZER_CLASS_CONFIG = "key.serializer"; private static final String KEY_SERIALIZER_CLASS_DOC = "Serializer class for key that implements the Serializer interface."; @@ -182,6 +181,10 @@ public class ProducerConfig extends AbstractConfig { public static final String VALUE_SERIALIZER_CLASS_CONFIG = "value.serializer"; private static final String VALUE_SERIALIZER_CLASS_DOC = "Serializer class for value that implements the Serializer interface."; + /** security.protocol */ + public static final String SECURITY_PROTOCOL = "security.protocol"; + private static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Currently only PLAINTEXT is supported. SSL and Kerberos are planned for the near future"; + static { config = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, Importance.HIGH, BOOSTRAP_SERVERS_DOC) .define(BUFFER_MEMORY_CONFIG, Type.LONG, 32 * 1024 * 1024L, atLeast(0L), Importance.HIGH, BUFFER_MEMORY_DOC) @@ -230,7 +233,13 @@ public class ProducerConfig extends AbstractConfig { Importance.LOW, MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DOC) .define(KEY_SERIALIZER_CLASS_CONFIG, Type.CLASS, "org.apache.kafka.clients.producer.ByteArraySerializer", Importance.HIGH, KEY_SERIALIZER_CLASS_DOC) - .define(VALUE_SERIALIZER_CLASS_CONFIG, Type.CLASS, "org.apache.kafka.clients.producer.ByteArraySerializer", Importance.HIGH, VALUE_SERIALIZER_CLASS_DOC); + .define(VALUE_SERIALIZER_CLASS_CONFIG, Type.CLASS, "org.apache.kafka.clients.producer.ByteArraySerializer", Importance.HIGH, VALUE_SERIALIZER_CLASS_DOC) + .define(SECURITY_PROTOCOL, + Type.STRING, + "PLAINTEXT", + in(Arrays.asList("PLAINTEXT")), + Importance.MEDIUM, + SECURITY_PROTOCOL_DOC); } ProducerConfig(Map props) { diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java index 7517b87..29ce7af 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java @@ -47,11 +47,17 @@ public class Protocol { new ArrayOf(STRING), "An array of topics to fetch metadata for. If no topics are specified fetch metadtata for all topics.")); + public static Schema METADATA_REQUEST_V1 = new Schema(new Field("topics", + new ArrayOf(STRING), + "An array of topics to fetch metadata for. If no topics are specified fetch metadtata for all topics."), + new Field("security_protocol",STRING,"The response should contain broker endpoints that correspond to this protocol")); + public static Schema BROKER = new Schema(new Field("node_id", INT32, "The broker id."), new Field("host", STRING, "The hostname of the broker."), new Field("port", INT32, "The port on which the broker accepts requests.")); - public static Schema PARTITION_METADATA_V0 = new Schema(new Field("partition_error_code", + + public static Schema PARTITION_METADATA_V0 = new Schema(new Field("partition_error_code", INT16, "The error code for the partition, if any."), new Field("partition_id", INT32, "The id of the partition."), @@ -76,8 +82,8 @@ public class Protocol { "Host and port information for all brokers."), new Field("topic_metadata", new ArrayOf(TOPIC_METADATA_V0))); - public static Schema[] METADATA_REQUEST = new Schema[] { METADATA_REQUEST_V0 }; - public static Schema[] METADATA_RESPONSE = new Schema[] { METADATA_RESPONSE_V0 }; + public static Schema[] METADATA_REQUEST = new Schema[] { METADATA_REQUEST_V0, METADATA_REQUEST_V1 }; + public static Schema[] METADATA_RESPONSE = new Schema[] { METADATA_RESPONSE_V0, METADATA_RESPONSE_V0 }; /* Produce api */ diff --git a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java index b22ca1d..4e53ad9 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java @@ -23,16 +23,30 @@ import org.apache.kafka.common.protocol.types.Struct; public class MetadataRequest extends AbstractRequestResponse { public static Schema curSchema = ProtoUtils.currentRequestSchema(ApiKeys.METADATA.id); + public static String DEFAULT_PROTOCOL = "PLAINTEXT"; private static String TOPICS_KEY_NAME = "topics"; + private static String PROTOCOL_KEY_NAME = "security_protocol"; + private final List topics; + private String protocol; + /* Constructor for V0 */ public MetadataRequest(List topics) { - super(new Struct(curSchema)); + super(new Struct(ProtoUtils.requestSchema(ApiKeys.METADATA.id,0))); struct.set(TOPICS_KEY_NAME, topics.toArray()); this.topics = topics; } + /* Constructor for V1 */ + public MetadataRequest(List topics, String protocol) { + super(new Struct(ProtoUtils.requestSchema(ApiKeys.METADATA.id,1))); + struct.set(TOPICS_KEY_NAME, topics.toArray()); + struct.set(PROTOCOL_KEY_NAME, protocol); + this.topics = topics; + this.protocol = protocol; + } + public MetadataRequest(Struct struct) { super(struct); Object[] topicArray = struct.getArray(TOPICS_KEY_NAME); @@ -40,6 +54,9 @@ public class MetadataRequest extends AbstractRequestResponse { for (Object topicObj: topicArray) { topics.add((String) topicObj); } + if (struct.hasField(PROTOCOL_KEY_NAME)) { + protocol = struct.getString(PROTOCOL_KEY_NAME); + } } public List topics() { @@ -47,6 +64,11 @@ public class MetadataRequest extends AbstractRequestResponse { } public static MetadataRequest parse(ByteBuffer buffer) { - return new MetadataRequest(((Struct) curSchema.read(buffer))); + return new MetadataRequest(((Struct) curSchema.read(buffer))); + } + + public static MetadataRequest parse(ByteBuffer buffer, int versionId) { + Schema schema = ProtoUtils.requestSchema(ApiKeys.METADATA.id,versionId); + return new MetadataRequest(((Struct) schema.read(buffer))); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java index 1a55242..749fb47 100644 --- a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java @@ -37,7 +37,7 @@ public class NetworkClientTest { private int nodeId = 1; private Cluster cluster = TestUtils.singletonCluster("test", nodeId); private Node node = cluster.nodes().get(0); - private NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 0, 64 * 1024, 64 * 1024); + private NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 0, 64 * 1024, 64 * 1024,"PLAINTEXT"); @Before public void setup() { diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java index df37fc6..58b5c3e 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java @@ -124,7 +124,7 @@ public class RequestResponseTest { } private AbstractRequestResponse createMetadataRequest() { - return new MetadataRequest(Arrays.asList("topic1")); + return new MetadataRequest(Arrays.asList("topic1"), "PLAINTEXT"); } private AbstractRequestResponse createMetadataResponse() { diff --git a/core/src/main/scala/kafka/admin/AdminUtils.scala b/core/src/main/scala/kafka/admin/AdminUtils.scala index 28b12c7..d06775c 100644 --- a/core/src/main/scala/kafka/admin/AdminUtils.scala +++ b/core/src/main/scala/kafka/admin/AdminUtils.scala @@ -18,7 +18,12 @@ package kafka.admin import kafka.common._ -import kafka.cluster.Broker +import java.util.Random +import java.util.Properties +import kafka.api.{TopicMetadata, PartitionMetadata} +import kafka.cluster.ProtocolType.ProtocolType +import kafka.cluster.{BrokerEndpoint, Broker, ProtocolType} + import kafka.log.LogConfig import kafka.utils.{Logging, ZkUtils, Json} import kafka.api.{TopicMetadata, PartitionMetadata} @@ -287,6 +292,8 @@ object AdminUtils extends Logging { topics.map(topic => fetchTopicMetadataFromZk(topic, zkClient, cachedBrokerInfo)) } + + private def fetchTopicMetadataFromZk(topic: String, zkClient: ZkClient, cachedBrokerInfo: mutable.HashMap[Int, Broker]): TopicMetadata = { if(ZkUtils.pathExists(zkClient, ZkUtils.getTopicPath(topic))) { val topicPartitionAssignment = ZkUtils.getPartitionAssignmentForTopics(zkClient, List(topic)).get(topic).get diff --git a/core/src/main/scala/kafka/admin/TopicCommand.scala b/core/src/main/scala/kafka/admin/TopicCommand.scala index 285c033..17e50bf 100644 --- a/core/src/main/scala/kafka/admin/TopicCommand.scala +++ b/core/src/main/scala/kafka/admin/TopicCommand.scala @@ -196,9 +196,7 @@ object TopicCommand { } } } - - def formatBroker(broker: Broker) = broker.id + " (" + formatAddress(broker.host, broker.port) + ")" - + def parseTopicConfigsToBeAdded(opts: TopicCommandOptions): Properties = { val configsToBeAdded = opts.options.valuesOf(opts.configOpt).map(_.split("""\s*=\s*""")) require(configsToBeAdded.forall(config => config.length == 2), diff --git a/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala b/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala index 6d00ed0..6579b60 100644 --- a/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala +++ b/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala @@ -18,12 +18,14 @@ package kafka.api import java.nio.ByteBuffer +import kafka.cluster.ProtocolType +import kafka.cluster.ProtocolType.ProtocolType import kafka.network.{BoundedByteBufferSend, RequestChannel} import kafka.network.RequestChannel.Response import kafka.common.ErrorMapping object ConsumerMetadataRequest { - val CurrentVersion = 0.shortValue + val CurrentVersion = 1.shortValue val DefaultClientId = "" def readFrom(buffer: ByteBuffer) = { @@ -31,10 +33,10 @@ object ConsumerMetadataRequest { val versionId = buffer.getShort val correlationId = buffer.getInt val clientId = ApiUtils.readShortString(buffer) - // request val group = ApiUtils.readShortString(buffer) - ConsumerMetadataRequest(group, versionId, correlationId, clientId) + val securityProtocol = ProtocolType.withName(ApiUtils.readShortString(buffer)) + ConsumerMetadataRequest(group, versionId, correlationId, clientId, securityProtocol) } } @@ -42,14 +44,16 @@ object ConsumerMetadataRequest { case class ConsumerMetadataRequest(group: String, versionId: Short = ConsumerMetadataRequest.CurrentVersion, correlationId: Int = 0, - clientId: String = ConsumerMetadataRequest.DefaultClientId) + clientId: String = ConsumerMetadataRequest.DefaultClientId, + securityProtocol: ProtocolType = ProtocolType.PLAINTEXT) extends RequestOrResponse(Some(RequestKeys.ConsumerMetadataKey)) { def sizeInBytes = 2 + /* versionId */ 4 + /* correlationId */ ApiUtils.shortStringLength(clientId) + - ApiUtils.shortStringLength(group) + ApiUtils.shortStringLength(group) + + ApiUtils.shortStringLength(securityProtocol.toString) def writeTo(buffer: ByteBuffer) { // envelope @@ -59,6 +63,7 @@ case class ConsumerMetadataRequest(group: String, // consumer metadata request ApiUtils.writeShortString(buffer, group) + ApiUtils.writeShortString(buffer, securityProtocol.toString) } override def handleError(e: Throwable, requestChannel: RequestChannel, request: RequestChannel.Request): Unit = { @@ -74,6 +79,7 @@ case class ConsumerMetadataRequest(group: String, consumerMetadataRequest.append("; CorrelationId: " + correlationId) consumerMetadataRequest.append("; ClientId: " + clientId) consumerMetadataRequest.append("; Group: " + group) + consumerMetadataRequest.append("; SecurityProtocol" + securityProtocol.toString) consumerMetadataRequest.toString() } } \ No newline at end of file diff --git a/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala b/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala index 84f6017..d575232 100644 --- a/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala +++ b/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala @@ -18,18 +18,18 @@ package kafka.api import java.nio.ByteBuffer -import kafka.cluster.Broker +import kafka.cluster.{ProtocolType, BrokerEndpoint, Broker} import kafka.common.ErrorMapping object ConsumerMetadataResponse { val CurrentVersion = 0 - private val NoBrokerOpt = Some(Broker(id = -1, host = "", port = -1)) + private val NoBrokerOpt = Some(BrokerEndpoint(id = -1, host = "", port = -1, protocolType = ProtocolType.PLAINTEXT)) def readFrom(buffer: ByteBuffer) = { val correlationId = buffer.getInt val errorCode = buffer.getShort - val broker = Broker.readFrom(buffer) + val broker = BrokerEndpoint.readFrom(buffer) val coordinatorOpt = if (errorCode == ErrorMapping.NoError) Some(broker) else @@ -40,7 +40,7 @@ object ConsumerMetadataResponse { } -case class ConsumerMetadataResponse (coordinatorOpt: Option[Broker], errorCode: Short, correlationId: Int = 0) +case class ConsumerMetadataResponse (coordinatorOpt: Option[BrokerEndpoint], errorCode: Short, correlationId: Int = 0) extends RequestOrResponse() { def sizeInBytes = diff --git a/core/src/main/scala/kafka/api/TopicMetadata.scala b/core/src/main/scala/kafka/api/TopicMetadata.scala index 0190076..b1ff35e 100644 --- a/core/src/main/scala/kafka/api/TopicMetadata.scala +++ b/core/src/main/scala/kafka/api/TopicMetadata.scala @@ -17,7 +17,7 @@ package kafka.api -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, Broker} import java.nio.ByteBuffer import kafka.api.ApiUtils._ import kafka.utils.Logging @@ -110,10 +110,23 @@ object PartitionMetadata { } case class PartitionMetadata(partitionId: Int, - val leader: Option[Broker], - replicas: Seq[Broker], + val leader: Option[Broker], + replicas: Seq[Broker], isr: Seq[Broker] = Seq.empty, errorCode: Short = ErrorMapping.NoError) extends Logging { + + /*def this(partitionId: Int, + leader: Option[BrokerEndPoint], + replicas: Seq[BrokerEndPoint], + isr: Seq[BrokerEndPoint] = Seq.empty, + errorCode: Short = ErrorMapping.NoError) = { + this(partitionId, + Some(if (leader.isDefined) Broker.createBroker(leader.get) else Broker.noBroker()), + replicas.map(endpoint=>Broker.createBroker(endpoint)), + isr.map(endpoint=>Broker.createBroker(endpoint)), + errorCode) + }*/ + def sizeInBytes: Int = { 2 /* error code */ + 4 /* partition id */ + @@ -142,14 +155,13 @@ case class PartitionMetadata(partitionId: Int, override def toString(): String = { val partitionMetadataString = new StringBuilder partitionMetadataString.append("\tpartition " + partitionId) - partitionMetadataString.append("\tleader: " + (if(leader.isDefined) formatBroker(leader.get) else "none")) - partitionMetadataString.append("\treplicas: " + replicas.map(formatBroker).mkString(",")) - partitionMetadataString.append("\tisr: " + isr.map(formatBroker).mkString(",")) + partitionMetadataString.append("\tleader: " + (if(leader.isDefined) leader.get.toString else "none")) + partitionMetadataString.append("\treplicas: " + replicas.mkString(",")) + partitionMetadataString.append("\tisr: " + isr.mkString(",")) partitionMetadataString.append("\tisUnderReplicated: %s".format(if(isr.size < replicas.size) "true" else "false")) partitionMetadataString.toString() } - private def formatBroker(broker: Broker) = broker.id + " (" + formatAddress(broker.host, broker.port) + ")" } diff --git a/core/src/main/scala/kafka/api/TopicMetadataRequest.scala b/core/src/main/scala/kafka/api/TopicMetadataRequest.scala index 7dca09c..4f1ad1a 100644 --- a/core/src/main/scala/kafka/api/TopicMetadataRequest.scala +++ b/core/src/main/scala/kafka/api/TopicMetadataRequest.scala @@ -19,6 +19,9 @@ package kafka.api import java.nio.ByteBuffer import kafka.api.ApiUtils._ +import kafka.cluster.ProtocolType +import kafka.cluster.ProtocolType.ProtocolType +import kafka.cluster.ProtocolType.ProtocolType import collection.mutable.ListBuffer import kafka.network.{BoundedByteBufferSend, RequestChannel} import kafka.common.ErrorMapping @@ -26,34 +29,47 @@ import kafka.network.RequestChannel.Response import kafka.utils.Logging object TopicMetadataRequest extends Logging { - val CurrentVersion = 0.shortValue + val CurrentVersion = 1.shortValue val DefaultClientId = "" /** * TopicMetadataRequest has the following format - - * number of topics (4 bytes) list of topics (2 bytes + topic.length per topic) detailedMetadata (2 bytes) timestamp (8 bytes) count (4 bytes) + * security protocol (2 bytes + protocol.length) + * number of topics (4 bytes) + * list of topics (2 bytes + topic.length per topic) + * detailedMetadata (2 bytes) + * timestamp (8 bytes) + * count (4 bytes) */ def readFrom(buffer: ByteBuffer): TopicMetadataRequest = { val versionId = buffer.getShort + assert(versionId == 0 || versionId == 1, + "Version " + versionId + " is invalid for TopicMetadataRequest. Valid versions are 0 or 1.") val correlationId = buffer.getInt val clientId = readShortString(buffer) val numTopics = readIntInRange(buffer, "number of topics", (0, Int.MaxValue)) val topics = new ListBuffer[String]() for(i <- 0 until numTopics) topics += readShortString(buffer) - new TopicMetadataRequest(versionId, correlationId, clientId, topics.toList) + + var securityProtocol = org.apache.kafka.common.requests.MetadataRequest.DEFAULT_PROTOCOL + if (versionId == 1) { + securityProtocol = readShortString(buffer) + } + new TopicMetadataRequest(versionId, correlationId, clientId, ProtocolType.withName(securityProtocol), topics.toList) } } case class TopicMetadataRequest(val versionId: Short, val correlationId: Int, val clientId: String, + val securityProtocol: ProtocolType, val topics: Seq[String]) extends RequestOrResponse(Some(RequestKeys.MetadataKey)){ def this(topics: Seq[String], correlationId: Int) = - this(TopicMetadataRequest.CurrentVersion, correlationId, TopicMetadataRequest.DefaultClientId, topics) + this(TopicMetadataRequest.CurrentVersion, correlationId, TopicMetadataRequest.DefaultClientId, ProtocolType.PLAINTEXT, topics) def writeTo(buffer: ByteBuffer) { buffer.putShort(versionId) @@ -61,6 +77,7 @@ case class TopicMetadataRequest(val versionId: Short, writeShortString(buffer, clientId) buffer.putInt(topics.size) topics.foreach(topic => writeShortString(buffer, topic)) + writeShortString(buffer, securityProtocol.toString) } def sizeInBytes(): Int = { @@ -68,7 +85,8 @@ case class TopicMetadataRequest(val versionId: Short, 4 + /* correlation id */ shortStringLength(clientId) + /* client id */ 4 + /* number of topics */ - topics.foldLeft(0)(_ + shortStringLength(_)) /* topics */ + topics.foldLeft(0)(_ + shortStringLength(_)) + /* topics */ + shortStringLength(securityProtocol.toString) /* security protocol */ } override def toString(): String = { @@ -89,6 +107,7 @@ case class TopicMetadataRequest(val versionId: Short, topicMetadataRequest.append("; Version: " + versionId) topicMetadataRequest.append("; CorrelationId: " + correlationId) topicMetadataRequest.append("; ClientId: " + clientId) + topicMetadataRequest.append("; SecurityProtocol" + securityProtocol.toString) if(details) topicMetadataRequest.append("; Topics: " + topics.mkString(",")) topicMetadataRequest.toString() diff --git a/core/src/main/scala/kafka/api/TopicMetadataResponse.scala b/core/src/main/scala/kafka/api/TopicMetadataResponse.scala index 92ac4e6..db9f88e 100644 --- a/core/src/main/scala/kafka/api/TopicMetadataResponse.scala +++ b/core/src/main/scala/kafka/api/TopicMetadataResponse.scala @@ -17,7 +17,7 @@ package kafka.api -import kafka.cluster.Broker +import kafka.cluster.{ProtocolType, BrokerEndpoint, Broker} import java.nio.ByteBuffer object TopicMetadataResponse { @@ -25,22 +25,28 @@ object TopicMetadataResponse { def readFrom(buffer: ByteBuffer): TopicMetadataResponse = { val correlationId = buffer.getInt val brokerCount = buffer.getInt - val brokers = (0 until brokerCount).map(_ => Broker.readFrom(buffer)) - val brokerMap = brokers.map(b => (b.id, b)).toMap + val brokers = (0 until brokerCount).map(_ => BrokerEndpoint.readFrom(buffer)) + /* The broker list we are using for the TopicMetadataResponse is a collection of end-points (so clients can use them for connections) + * But partitionMetadata has multiple use-cases, some of them seem to require actual brokers + * So converting endpoints to broker objects here to preserve the partitionMetadata as is. + * TODO: check if we can use endpoints in partitionMetadata */ + val brokerMap = brokers.map(b => (b.id, Broker.createBroker(b))).toMap val topicCount = buffer.getInt val topicsMetadata = (0 until topicCount).map(_ => TopicMetadata.readFrom(buffer, brokerMap)) new TopicMetadataResponse(brokers, topicsMetadata, correlationId) } } -case class TopicMetadataResponse(brokers: Seq[Broker], +case class TopicMetadataResponse(brokers: Seq[BrokerEndpoint], topicsMetadata: Seq[TopicMetadata], correlationId: Int) extends RequestOrResponse() { + val sizeInBytes: Int = { 4 + 4 + brokers.map(_.sizeInBytes).sum + 4 + topicsMetadata.map(_.sizeInBytes).sum } + def writeTo(buffer: ByteBuffer) { buffer.putInt(correlationId) /* brokers */ diff --git a/core/src/main/scala/kafka/client/ClientUtils.scala b/core/src/main/scala/kafka/client/ClientUtils.scala index ebba87f..5acc878 100644 --- a/core/src/main/scala/kafka/client/ClientUtils.scala +++ b/core/src/main/scala/kafka/client/ClientUtils.scala @@ -16,7 +16,9 @@ */ package kafka.client -import scala.collection._ + import kafka.cluster.ProtocolType.ProtocolType + + import scala.collection._ import kafka.cluster._ import kafka.api._ import kafka.producer._ @@ -42,10 +44,10 @@ object ClientUtils extends Logging{ * @param producerConfig The producer's config * @return topic metadata response */ - def fetchTopicMetadata(topics: Set[String], brokers: Seq[Broker], producerConfig: ProducerConfig, correlationId: Int): TopicMetadataResponse = { + def fetchTopicMetadata(topics: Set[String], brokers: Seq[BrokerEndpoint], producerConfig: ProducerConfig, correlationId: Int): TopicMetadataResponse = { var fetchMetaDataSucceeded: Boolean = false var i: Int = 0 - val topicMetadataRequest = new TopicMetadataRequest(TopicMetadataRequest.CurrentVersion, correlationId, producerConfig.clientId, topics.toSeq) + val topicMetadataRequest = new TopicMetadataRequest(TopicMetadataRequest.CurrentVersion, correlationId, producerConfig.clientId, producerConfig.securityProtocol, topics.toSeq) var topicMetadataResponse: TopicMetadataResponse = null var t: Throwable = null // shuffle the list of brokers before sending metadata requests so that most requests don't get routed to the @@ -83,7 +85,7 @@ object ClientUtils extends Logging{ * @param clientId The client's identifier * @return topic metadata response */ - def fetchTopicMetadata(topics: Set[String], brokers: Seq[Broker], clientId: String, timeoutMs: Int, + def fetchTopicMetadata(topics: Set[String], brokers: Seq[BrokerEndpoint], clientId: String, timeoutMs: Int, correlationId: Int = 0): TopicMetadataResponse = { val props = new Properties() props.put("metadata.broker.list", brokers.map(_.connectionString).mkString(",")) @@ -96,22 +98,22 @@ object ClientUtils extends Logging{ /** * Parse a list of broker urls in the form host1:port1, host2:port2, ... */ - def parseBrokerList(brokerListStr: String): Seq[Broker] = { + def parseBrokerList(brokerListStr: String, protocolType: ProtocolType = ProtocolType.PLAINTEXT): Seq[BrokerEndpoint] = { val brokersStr = Utils.parseCsvList(brokerListStr) brokersStr.zipWithIndex.map { case (address, brokerId) => - new Broker(brokerId, getHost(address), getPort(address)) + BrokerEndpoint.createBrokerEndPoint(brokerId,protocolType.toString + "://" + address) } } /** * Creates a blocking channel to a random broker */ - def channelToAnyBroker(zkClient: ZkClient, socketTimeoutMs: Int = 3000) : BlockingChannel = { + def channelToAnyBroker(zkClient: ZkClient, protocolType: ProtocolType, socketTimeoutMs: Int = 3000) : BlockingChannel = { var channel: BlockingChannel = null var connected = false while (!connected) { - val allBrokers = getAllBrokersInCluster(zkClient) + val allBrokers = getAllBrokerEndPointsForChannel(zkClient,protocolType) Random.shuffle(allBrokers).find { broker => trace("Connecting to broker %s:%d.".format(broker.host, broker.port)) try { @@ -136,19 +138,19 @@ object ClientUtils extends Logging{ /** * Creates a blocking channel to the offset manager of the given group */ - def channelToOffsetManager(group: String, zkClient: ZkClient, socketTimeoutMs: Int = 3000, retryBackOffMs: Int = 1000) = { - var queryChannel = channelToAnyBroker(zkClient) + def channelToOffsetManager(group: String, zkClient: ZkClient, socketTimeoutMs: Int = 3000, retryBackOffMs: Int = 1000, protocolType: ProtocolType = ProtocolType.PLAINTEXT) = { + var queryChannel = channelToAnyBroker(zkClient, protocolType) var offsetManagerChannelOpt: Option[BlockingChannel] = None while (!offsetManagerChannelOpt.isDefined) { - var coordinatorOpt: Option[Broker] = None + var coordinatorOpt: Option[BrokerEndpoint] = None while (!coordinatorOpt.isDefined) { try { if (!queryChannel.isConnected) - queryChannel = channelToAnyBroker(zkClient) + queryChannel = channelToAnyBroker(zkClient, protocolType) debug("Querying %s:%d to locate offset manager for %s.".format(queryChannel.host, queryChannel.port, group)) queryChannel.send(ConsumerMetadataRequest(group)) val response = queryChannel.receive() diff --git a/core/src/main/scala/kafka/cluster/Broker.scala b/core/src/main/scala/kafka/cluster/Broker.scala index 0060add..684bc32 100644 --- a/core/src/main/scala/kafka/cluster/Broker.scala +++ b/core/src/main/scala/kafka/cluster/Broker.scala @@ -18,17 +18,36 @@ package kafka.cluster import kafka.utils.Utils._ -import kafka.utils.Json -import kafka.api.ApiUtils._ +import kafka.utils.{Utils, Json} import java.nio.ByteBuffer -import kafka.common.{KafkaException, BrokerNotAvailableException} -import org.apache.kafka.common.utils.Utils._ +import kafka.common.{BrokerEndPointNotAvailableException, KafkaException, BrokerNotAvailableException} +import kafka.cluster.ProtocolType._ /** * A Kafka broker + * A broker has an id, a host and a collection of end-points + * each end-point is (port,protocolType) + * currently the only channel type is PlainText + * but we will add SSL and Kerberos in the future */ object Broker { + /** + * Create a broker object from id and JSON string + * @param id + * @param brokerInfoString + * The current JSON schema for a broker is: + * {"version":1, + * "host":"localhost", + * "jmx_port":9999, + * "timestamp":2233345666, + * "endpoints": [ + * {"port":9092, + * "protocolType":"plain"}, + * {"port":9093, + * "protocolType":"ssl"}] + * @return + */ def createBroker(id: Int, brokerInfoString: String): Broker = { if(brokerInfoString == null) throw new BrokerNotAvailableException("Broker id %s does not exist".format(id)) @@ -38,7 +57,8 @@ object Broker { val brokerInfo = m.asInstanceOf[Map[String, Any]] val host = brokerInfo.get("host").get.asInstanceOf[String] val port = brokerInfo.get("port").get.asInstanceOf[Int] - new Broker(id, host, port) + val endPoints = brokerInfo.get("endpoints").get.asInstanceOf[String] + new Broker(id, Utils.listenerListToEndPoints(endPoints)) case None => throw new BrokerNotAvailableException("Broker id %d does not exist".format(id)) } @@ -47,36 +67,86 @@ object Broker { } } + def createBroker(endPoint: BrokerEndpoint) = { + Broker(endPoint.id,List(EndPoint(endPoint.host, endPoint.port,endPoint.protocolType))) + } + + def noBroker() = { + Broker(-1,List()) + } + + /** + * + * @param buffer containing serialized broker + * current serialization is: + * id (int), host (size + string), number of endpoints (int), serialized endpoints + * @return broker object + */ def readFrom(buffer: ByteBuffer): Broker = { val id = buffer.getInt - val host = readShortString(buffer) - val port = buffer.getInt - new Broker(id, host, port) + val numEndpoints = buffer.getInt + + val endpoints = List.range(0,numEndpoints).map(i => EndPoint.readFrom(buffer)) + + new Broker(id, endpoints) } } -case class Broker(id: Int, host: String, port: Int) { - - override def toString: String = "id:" + id + ",host:" + host + ",port:" + port +case class Broker(id: Int, endPoints: Seq[EndPoint]) { + + override def toString: String = id + " : " + endPoints.mkString("(",",",")") + + def this(id: Int, host: String, port: Int) = { + this(id,List(EndPoint(host,port,ProtocolType.PLAINTEXT))) + } - def connectionString: String = formatAddress(host, port) def writeTo(buffer: ByteBuffer) { buffer.putInt(id) - writeShortString(buffer, host) - buffer.putInt(port) + buffer.putInt(endPoints.size) + for(endpoint <- endPoints) { + endpoint.writeTo(buffer) + } } - def sizeInBytes: Int = shortStringLength(host) /* host name */ + 4 /* port */ + 4 /* broker id*/ + def sizeInBytes: Int = + 4 + /* broker id*/ + 4 + /* number of endPoints */ + endPoints.map(_.sizeInBytes).sum /* end points */ + def supportsChannel(protocolType: ProtocolType): Unit = { + endPoints.map((endpoint)=>(endpoint.protocolType)).contains(protocolType) + } + + def getBrokerEndPoint(protocolType: ProtocolType): BrokerEndpoint = { + val endpoint = endPoints.map((endpoint)=>(endpoint.protocolType,endpoint)).toMap.get(protocolType) + endpoint match { + case Some(endpoint) => new BrokerEndpoint(id,endpoint.host,endpoint.port,endpoint.protocolType) + case None => + throw new BrokerEndPointNotAvailableException("End point %s not found for broker %d".format(protocolType,id)) + } + + + } + + + + /* TODO: Unit test! */ override def equals(obj: Any): Boolean = { obj match { case null => false - case n: Broker => id == n.id && host == n.host && port == n.port + // Yes, Scala compares lists element by element + case n: Broker => id == n.id && endPoints == n.endPoints case _ => false } } - - override def hashCode(): Int = hashcode(id, host, port) + + /* TODO: Unit test! */ + override def hashCode(): Int = hashcode(id, endPoints) } + + + + + diff --git a/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala b/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala new file mode 100644 index 0000000..bf6f406 --- /dev/null +++ b/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala @@ -0,0 +1,59 @@ +package kafka.cluster + +import java.nio.ByteBuffer + +import kafka.api.ApiUtils._ +import org.apache.kafka.common.utils.Utils._ +import kafka.cluster.ProtocolType._ + +object BrokerEndpoint { + def createBrokerEndPoint(brokerId: Int, connectionString: String): BrokerEndpoint = { + val endPoint = EndPoint.createEndPoint(connectionString) + new BrokerEndpoint(brokerId,endPoint.host,endPoint.port.toInt,endPoint.protocolType) + } + + /** + * BrokerEndpoint includes the protocol type to allow locating the right endpoint in lists + * but we don't serialize or de-serialize it + * this allows us to keep the wire protocol with the clients unchanged where the protocol is not needed + * @param buffer + * @return + */ + def readFrom(buffer: ByteBuffer): BrokerEndpoint = { + val brokerId = buffer.getInt() + val host = readShortString(buffer) + val port = buffer.getInt() + BrokerEndpoint(brokerId,host,port,null) + } +} + +// Utility class, representing a particular method of connecting to a broker +// Mostly to be used by clients +// This is not a broker and is not stored in ZooKeeper +case class BrokerEndpoint(id: Int, host: String, port: Int, protocolType: ProtocolType) { + + def connectionString(): String = formatEndpoint(host,port,protocolType) + + /** + * Formats broker endpoint as "channel://host:port" address string, + * surrounding IPv6 addresses with braces '[', ']' + * @param host hostname + * @param port port number + * @param protocolType String representing channel type (plain, SSL, KRB) + * @return address string + */ + def formatEndpoint(host: String, port: Integer, protocolType: ProtocolType): String = { + return protocolType + "://" + formatAddress(host, port) + } + + def writeTo(buffer: ByteBuffer): Unit = { + buffer.putInt(id) + writeShortString(buffer,host) + buffer.putInt(port) + } + + def sizeInBytes: Int = + 4 + /* broker Id */ + 4 + /* port */ + shortStringLength(host) +} diff --git a/core/src/main/scala/kafka/cluster/EndPoint.scala b/core/src/main/scala/kafka/cluster/EndPoint.scala new file mode 100644 index 0000000..085b333 --- /dev/null +++ b/core/src/main/scala/kafka/cluster/EndPoint.scala @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.cluster + +import java.nio.ByteBuffer + +import kafka.api.ApiUtils._ +import kafka.common.KafkaException +import kafka.cluster.ProtocolType._ + +object EndPoint { + + def readFrom(buffer: ByteBuffer): EndPoint = { + val port = buffer.getInt() + val host = readShortString(buffer) + val channel = readShortString(buffer) + EndPoint(host,port,ProtocolType.withName(channel)) + } + + def createEndPoint(connectionString: String): EndPoint = { + val uriParseExp = """^(.*)://([0-9a-z\-.]+):([0-9]+)""".r + connectionString match { + case uriParseExp(protocol,host,port) => new EndPoint(host,port.toInt,ProtocolType.withName(protocol)) + case _ => throw new KafkaException("Unable to parse " + connectionString + " to a broker endpoint") + } + } +} + +case class EndPoint(host: String, port: Int, protocolType: ProtocolType) { + + override def toString: String = protocolType + "://" + host + ":" + port + + def writeTo(buffer: ByteBuffer): Unit = { + buffer.putInt(port) + writeShortString(buffer,host) + writeShortString(buffer,protocolType.toString) + } + + def sizeInBytes: Int = + 4 + /* port */ + shortStringLength(host) + + shortStringLength(protocolType.toString) +} diff --git a/core/src/main/scala/kafka/cluster/ProtocolType.scala b/core/src/main/scala/kafka/cluster/ProtocolType.scala new file mode 100644 index 0000000..f4e6bc3 --- /dev/null +++ b/core/src/main/scala/kafka/cluster/ProtocolType.scala @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.cluster + + +object ProtocolType extends Enumeration { + + type ProtocolType = Value + val PLAINTEXT = Value +} + diff --git a/core/src/main/scala/kafka/common/BrokerEndPointNotAvailableException.scala b/core/src/main/scala/kafka/common/BrokerEndPointNotAvailableException.scala new file mode 100644 index 0000000..455d8c6 --- /dev/null +++ b/core/src/main/scala/kafka/common/BrokerEndPointNotAvailableException.scala @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.common + +class BrokerEndPointNotAvailableException(message: String) extends RuntimeException(message) { + def this() = this(null) +} diff --git a/core/src/main/scala/kafka/consumer/ConsumerConfig.scala b/core/src/main/scala/kafka/consumer/ConsumerConfig.scala index 9ebbee6..fe89a30 100644 --- a/core/src/main/scala/kafka/consumer/ConsumerConfig.scala +++ b/core/src/main/scala/kafka/consumer/ConsumerConfig.scala @@ -19,6 +19,7 @@ package kafka.consumer import java.util.Properties import kafka.api.OffsetRequest +import kafka.cluster.ProtocolType import kafka.utils._ import kafka.common.{InvalidConfigException, Config} @@ -45,6 +46,7 @@ object ConsumerConfig extends Config { val OffsetsChannelSocketTimeoutMs = 10000 val OffsetsCommitMaxRetries = 5 val OffsetsStorage = "zookeeper" + val SecurityProtocol = "PLAINTEXT" val MirrorTopicsWhitelistProp = "mirror.topics.whitelist" val MirrorTopicsBlacklistProp = "mirror.topics.blacklist" @@ -180,6 +182,9 @@ class ConsumerConfig private (val props: VerifiableProperties) extends ZKConfig( /** Select a strategy for assigning partitions to consumer streams. Possible values: range, roundrobin */ val partitionAssignmentStrategy = props.getString("partition.assignment.strategy", DefaultPartitionAssignmentStrategy) + + /* plaintext or SSL */ + val securityProtocol = ProtocolType.withName(props.getString("security.protocol", "PLAINTEXT")) validate(this) } diff --git a/core/src/main/scala/kafka/consumer/ConsumerFetcherManager.scala b/core/src/main/scala/kafka/consumer/ConsumerFetcherManager.scala index b9e2bea..c6700d2 100644 --- a/core/src/main/scala/kafka/consumer/ConsumerFetcherManager.scala +++ b/core/src/main/scala/kafka/consumer/ConsumerFetcherManager.scala @@ -19,7 +19,7 @@ package kafka.consumer import org.I0Itec.zkclient.ZkClient import kafka.server.{BrokerAndInitialOffset, AbstractFetcherThread, AbstractFetcherManager} -import kafka.cluster.{Cluster, Broker} +import kafka.cluster.{BrokerEndpoint, Cluster, Broker} import scala.collection.immutable import scala.collection.Map import collection.mutable.HashMap @@ -62,7 +62,7 @@ class ConsumerFetcherManager(private val consumerIdString: String, } trace("Partitions without leader %s".format(noLeaderPartitionSet)) - val brokers = getAllBrokersInCluster(zkClient) + val brokers = getAllBrokerEndPointsForChannel(zkClient,config.securityProtocol) val topicsMetadata = ClientUtils.fetchTopicMetadata(noLeaderPartitionSet.map(m => m.topic).toSet, brokers, config.clientId, @@ -117,7 +117,7 @@ class ConsumerFetcherManager(private val consumerIdString: String, override def createFetcherThread(fetcherId: Int, sourceBroker: Broker): AbstractFetcherThread = { new ConsumerFetcherThread( "ConsumerFetcherThread-%s-%d-%d".format(consumerIdString, fetcherId, sourceBroker.id), - config, sourceBroker, partitionMap, this) + config, sourceBroker.getBrokerEndPoint(config.securityProtocol), partitionMap, this) } def startConnections(topicInfos: Iterable[PartitionTopicInfo], cluster: Cluster) { diff --git a/core/src/main/scala/kafka/consumer/ConsumerFetcherThread.scala b/core/src/main/scala/kafka/consumer/ConsumerFetcherThread.scala index ee6139c..7e3816d 100644 --- a/core/src/main/scala/kafka/consumer/ConsumerFetcherThread.scala +++ b/core/src/main/scala/kafka/consumer/ConsumerFetcherThread.scala @@ -17,7 +17,7 @@ package kafka.consumer -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, Broker} import kafka.server.AbstractFetcherThread import kafka.message.ByteBufferMessageSet import kafka.api.{Request, OffsetRequest, FetchResponsePartitionData} @@ -26,7 +26,7 @@ import kafka.common.TopicAndPartition class ConsumerFetcherThread(name: String, val config: ConsumerConfig, - sourceBroker: Broker, + sourceBroker: BrokerEndpoint, partitionMap: Map[TopicAndPartition, PartitionTopicInfo], val consumerFetcherManager: ConsumerFetcherManager) extends AbstractFetcherThread(name = name, diff --git a/core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala b/core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala index 191a867..bbb93e0 100644 --- a/core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala +++ b/core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala @@ -185,7 +185,8 @@ private[kafka] class ZookeeperConsumerConnector(val config: ConsumerConfig, private def ensureOffsetManagerConnected() { if (config.offsetsStorage == "kafka") { if (offsetsChannel == null || !offsetsChannel.isConnected) - offsetsChannel = ClientUtils.channelToOffsetManager(config.groupId, zkClient, config.offsetsChannelSocketTimeoutMs, config.offsetsChannelBackoffMs) + offsetsChannel = ClientUtils.channelToOffsetManager(config.groupId, zkClient, + config.offsetsChannelSocketTimeoutMs, config.offsetsChannelBackoffMs, config.securityProtocol) debug("Connected to offset manager %s:%d.".format(offsetsChannel.host, offsetsChannel.port)) } diff --git a/core/src/main/scala/kafka/controller/ControllerChannelManager.scala b/core/src/main/scala/kafka/controller/ControllerChannelManager.scala index eb492f0..6d10650 100644 --- a/core/src/main/scala/kafka/controller/ControllerChannelManager.scala +++ b/core/src/main/scala/kafka/controller/ControllerChannelManager.scala @@ -80,7 +80,8 @@ class ControllerChannelManager (private val controllerContext: ControllerContext private def addNewBroker(broker: Broker) { val messageQueue = new LinkedBlockingQueue[(RequestOrResponse, (RequestOrResponse) => Unit)](config.controllerMessageQueueSize) debug("Controller %d trying to connect to broker %d".format(config.brokerId,broker.id)) - val channel = new BlockingChannel(broker.host, broker.port, + val brokerEndPoint = broker.getBrokerEndPoint(config.securityProtocol) + val channel = new BlockingChannel(brokerEndPoint.host, brokerEndPoint.port, BlockingChannel.UseDefaultBufferSize, BlockingChannel.UseDefaultBufferSize, config.controllerSocketTimeoutMs) diff --git a/core/src/main/scala/kafka/controller/KafkaController.scala b/core/src/main/scala/kafka/controller/KafkaController.scala index 66df6d2..4fbcdd3 100644 --- a/core/src/main/scala/kafka/controller/KafkaController.scala +++ b/core/src/main/scala/kafka/controller/KafkaController.scala @@ -212,7 +212,11 @@ class KafkaController(val config : KafkaConfig, zkClient: ZkClient, val brokerSt def epoch = controllerContext.epoch - def clientId = "id_%d-host_%s-port_%d".format(config.brokerId, config.hostName, config.port) + def clientId = { + val listeners = listenerListToEndPoints(config.listeners) + val controllerListener = listeners.find(endpoint => endpoint.protocolType == config.securityProtocol) + "id_%d-host_%s-port_%d".format(config.brokerId, controllerListener.get.host, controllerListener.get.port) + } /** * On clean shutdown, the controller first determines the partitions that the diff --git a/core/src/main/scala/kafka/javaapi/ConsumerMetadataResponse.scala b/core/src/main/scala/kafka/javaapi/ConsumerMetadataResponse.scala index 1b28861..b5c4289 100644 --- a/core/src/main/scala/kafka/javaapi/ConsumerMetadataResponse.scala +++ b/core/src/main/scala/kafka/javaapi/ConsumerMetadataResponse.scala @@ -17,13 +17,13 @@ package kafka.javaapi -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, Broker} class ConsumerMetadataResponse(private val underlying: kafka.api.ConsumerMetadataResponse) { def errorCode = underlying.errorCode - def coordinator: Broker = { + def coordinator: BrokerEndpoint = { import kafka.javaapi.Implicits._ underlying.coordinatorOpt } diff --git a/core/src/main/scala/kafka/javaapi/TopicMetadata.scala b/core/src/main/scala/kafka/javaapi/TopicMetadata.scala index f384e04..255a9f7 100644 --- a/core/src/main/scala/kafka/javaapi/TopicMetadata.scala +++ b/core/src/main/scala/kafka/javaapi/TopicMetadata.scala @@ -16,7 +16,7 @@ */ package kafka.javaapi -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, Broker} import scala.collection.JavaConversions private[javaapi] object MetadataListImplicits { diff --git a/core/src/main/scala/kafka/javaapi/TopicMetadataRequest.scala b/core/src/main/scala/kafka/javaapi/TopicMetadataRequest.scala index b0b7be1..3ea12c9 100644 --- a/core/src/main/scala/kafka/javaapi/TopicMetadataRequest.scala +++ b/core/src/main/scala/kafka/javaapi/TopicMetadataRequest.scala @@ -18,6 +18,10 @@ package kafka.javaapi import kafka.api._ import java.nio.ByteBuffer +import kafka.cluster.ProtocolType +import kafka.cluster.ProtocolType.ProtocolType +import kafka.cluster.ProtocolType.ProtocolType + import scala.collection.mutable import kafka.network.{BoundedByteBufferSend, RequestChannel} import kafka.common.ErrorMapping @@ -26,19 +30,20 @@ import kafka.network.RequestChannel.Response class TopicMetadataRequest(val versionId: Short, val correlationId: Int, val clientId: String, + val securityProtocol: ProtocolType, val topics: java.util.List[String]) extends RequestOrResponse(Some(kafka.api.RequestKeys.MetadataKey)) { val underlying: kafka.api.TopicMetadataRequest = { import scala.collection.JavaConversions._ - new kafka.api.TopicMetadataRequest(versionId, correlationId, clientId, topics: mutable.Buffer[String]) + new kafka.api.TopicMetadataRequest(versionId, correlationId, clientId, securityProtocol, topics: mutable.Buffer[String]) } def this(topics: java.util.List[String]) = - this(kafka.api.TopicMetadataRequest.CurrentVersion, 0, kafka.api.TopicMetadataRequest.DefaultClientId, topics) + this(kafka.api.TopicMetadataRequest.CurrentVersion, 0, kafka.api.TopicMetadataRequest.DefaultClientId, ProtocolType.PLAINTEXT, topics) def this(topics: java.util.List[String], correlationId: Int) = - this(kafka.api.TopicMetadataRequest.CurrentVersion, correlationId, kafka.api.TopicMetadataRequest.DefaultClientId, topics) + this(kafka.api.TopicMetadataRequest.CurrentVersion, correlationId, kafka.api.TopicMetadataRequest.DefaultClientId, ProtocolType.PLAINTEXT, topics) def writeTo(buffer: ByteBuffer) = underlying.writeTo(buffer) diff --git a/core/src/main/scala/kafka/network/BlockingChannel.scala b/core/src/main/scala/kafka/network/BlockingChannel.scala index 6e2a38e..70be9c1 100644 --- a/core/src/main/scala/kafka/network/BlockingChannel.scala +++ b/core/src/main/scala/kafka/network/BlockingChannel.scala @@ -72,7 +72,10 @@ class BlockingChannel( val host: String, connectTimeoutMs)) } catch { - case e: Throwable => disconnect() + case e: Throwable => { + error(e) + disconnect() + } } } } diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index e451592..1161be0 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -24,7 +24,11 @@ import java.net._ import java.io._ import java.nio.channels._ +import kafka.cluster.EndPoint +import kafka.cluster.ProtocolType.ProtocolType + import scala.collection._ +import scala.collection.JavaConversions._ import kafka.common.KafkaException import kafka.metrics.KafkaMetricsGroup @@ -38,8 +42,7 @@ import com.yammer.metrics.core.{Gauge, Meter} * M Handler threads that handle requests and produce responses back to the processor threads for writing. */ class SocketServer(val brokerId: Int, - val host: String, - val port: Int, + val endpoints: Seq[EndPoint], val numProcessorThreads: Int, val maxQueuedRequests: Int, val sendBufferSize: Int, @@ -51,7 +54,7 @@ class SocketServer(val brokerId: Int, this.logIdent = "[Socket Server on Broker " + brokerId + "], " private val time = SystemTime private val processors = new Array[Processor](numProcessorThreads) - @volatile private var acceptor: Acceptor = null + @volatile private var acceptors: ConcurrentHashMap[EndPoint,Acceptor] = new ConcurrentHashMap[EndPoint,Acceptor]() val requestChannel = new RequestChannel(numProcessorThreads, maxQueuedRequests) /* a meter to track the average free capacity of the network processors */ @@ -72,7 +75,7 @@ class SocketServer(val brokerId: Int, requestChannel, quotas, connectionsMaxIdleMs) - Utils.newThread("kafka-network-thread-%d-%d".format(port, i), processors(i), false).start() + Utils.newThread("kafka-network-thread-%d".format(i), processors(i), false).start() } newGauge("ResponsesBeingSent", new Gauge[Int] { @@ -83,10 +86,17 @@ class SocketServer(val brokerId: Int, requestChannel.addResponseListener((id:Int) => processors(id).wakeup()) // start accepting connections - this.acceptor = new Acceptor(host, port, processors, sendBufferSize, recvBufferSize, quotas) - Utils.newThread("kafka-socket-acceptor", acceptor, false).start() - acceptor.awaitStartup - info("Started") + // right now we will use the same processors for all ports, since we didn't implement different protocols + // in the future, we may implement different processors for SSL and Kerberos + + endpoints.foreach(endpoint => { + val acceptor = new Acceptor(endpoint.host, endpoint.port, processors, sendBufferSize, recvBufferSize, quotas) + acceptors.put(endpoint,acceptor) + Utils.newThread("kafka-socket-acceptor-%s-%d".format(endpoint.protocolType.toString, endpoint.port), acceptor, false).start() + acceptor.awaitStartup + }) + + info("Started " + acceptors.size() + " acceptor threads") } /** @@ -94,8 +104,8 @@ class SocketServer(val brokerId: Int, */ def shutdown() = { info("Shutting down") - if(acceptor != null) - acceptor.shutdown() + if(acceptors != null) + acceptors.values().foreach(_.shutdown()) for(processor <- processors) processor.shutdown() info("Shutdown completed") diff --git a/core/src/main/scala/kafka/producer/ProducerConfig.scala b/core/src/main/scala/kafka/producer/ProducerConfig.scala index 3cdf23d..bbbf651 100644 --- a/core/src/main/scala/kafka/producer/ProducerConfig.scala +++ b/core/src/main/scala/kafka/producer/ProducerConfig.scala @@ -19,6 +19,7 @@ package kafka.producer import async.AsyncProducerConfig import java.util.Properties +import kafka.cluster.ProtocolType import kafka.utils.{Utils, VerifiableProperties} import kafka.message.{CompressionCodec, NoCompressionCodec} import kafka.common.{InvalidConfigException, Config} @@ -113,5 +114,8 @@ class ProducerConfig private (val props: VerifiableProperties) */ val topicMetadataRefreshIntervalMs = props.getInt("topic.metadata.refresh.interval.ms", 600000) + /* plaintext or SSL */ + val securityProtocol = ProtocolType.withName(props.getString("security.protocol", "PLAINTEXT")) + validate(this) } diff --git a/core/src/main/scala/kafka/producer/ProducerPool.scala b/core/src/main/scala/kafka/producer/ProducerPool.scala index 43df70b..72686fc 100644 --- a/core/src/main/scala/kafka/producer/ProducerPool.scala +++ b/core/src/main/scala/kafka/producer/ProducerPool.scala @@ -17,7 +17,7 @@ package kafka.producer -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, Broker} import java.util.Properties import collection.mutable.HashMap import java.lang.Object @@ -30,7 +30,7 @@ object ProducerPool { /** * Used in ProducerPool to initiate a SyncProducer connection with a broker. */ - def createSyncProducer(config: ProducerConfig, broker: Broker): SyncProducer = { + def createSyncProducer(config: ProducerConfig, broker: BrokerEndpoint): SyncProducer = { val props = new Properties() props.put("host", broker.host) props.put("port", broker.port.toString) @@ -44,11 +44,13 @@ class ProducerPool(val config: ProducerConfig) extends Logging { private val lock = new Object() def updateProducer(topicMetadata: Seq[TopicMetadata]) { - val newBrokers = new collection.mutable.HashSet[Broker] + val newBrokers = new collection.mutable.HashSet[BrokerEndpoint] topicMetadata.foreach(tmd => { tmd.partitionsMetadata.foreach(pmd => { - if(pmd.leader.isDefined) - newBrokers+=(pmd.leader.get) + if(pmd.leader.isDefined) { + val endpoint = pmd.leader.get.endPoints.head + newBrokers += BrokerEndpoint(pmd.leader.get.id, endpoint.host,endpoint.port,config.securityProtocol) + } }) }) lock synchronized { diff --git a/core/src/main/scala/kafka/server/AbstractFetcherManager.scala b/core/src/main/scala/kafka/server/AbstractFetcherManager.scala index 20c00cb..36f054e 100644 --- a/core/src/main/scala/kafka/server/AbstractFetcherManager.scala +++ b/core/src/main/scala/kafka/server/AbstractFetcherManager.scala @@ -21,7 +21,7 @@ import scala.collection.mutable import scala.collection.Set import scala.collection.Map import kafka.utils.{Utils, Logging} -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, Broker} import kafka.metrics.KafkaMetricsGroup import kafka.common.TopicAndPartition import com.yammer.metrics.core.Gauge diff --git a/core/src/main/scala/kafka/server/AbstractFetcherThread.scala b/core/src/main/scala/kafka/server/AbstractFetcherThread.scala index 8c281d4..d770768 100644 --- a/core/src/main/scala/kafka/server/AbstractFetcherThread.scala +++ b/core/src/main/scala/kafka/server/AbstractFetcherThread.scala @@ -17,7 +17,7 @@ package kafka.server -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, Broker} import kafka.utils.{Pool, ShutdownableThread} import kafka.consumer.{PartitionTopicInfo, SimpleConsumer} import kafka.api.{FetchRequest, FetchResponse, FetchResponsePartitionData, FetchRequestBuilder} @@ -36,7 +36,7 @@ import com.yammer.metrics.core.Gauge /** * Abstract class for fetching data from multiple partitions from the same broker. */ -abstract class AbstractFetcherThread(name: String, clientId: String, sourceBroker: Broker, socketTimeout: Int, socketBufferSize: Int, +abstract class AbstractFetcherThread(name: String, clientId: String, sourceBroker: BrokerEndpoint, socketTimeout: Int, socketBufferSize: Int, fetchSize: Int, fetcherBrokerId: Int = -1, maxWait: Int = 0, minBytes: Int = 1, isInterruptible: Boolean = true) extends ShutdownableThread(name, isInterruptible) { diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 2a1c032..9b3697c 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -382,7 +382,7 @@ class KafkaApis(val requestChannel: RequestChannel, val topicMetadata = getTopicMetadata(metadataRequest.topics.toSet) val brokers = metadataCache.getAliveBrokers trace("Sending topic metadata %s and brokers %s for correlation id %d to client %s".format(topicMetadata.mkString(","), brokers.mkString(","), metadataRequest.correlationId, metadataRequest.clientId)) - val response = new TopicMetadataResponse(brokers, topicMetadata, metadataRequest.correlationId) + val response = new TopicMetadataResponse(brokers.map(_.getBrokerEndPoint(metadataRequest.securityProtocol)), topicMetadata, metadataRequest.correlationId) requestChannel.sendResponse(new RequestChannel.Response(request, new BoundedByteBufferSend(response))) } @@ -417,12 +417,14 @@ class KafkaApis(val requestChannel: RequestChannel, // get metadata (and create the topic if necessary) val offsetsTopicMetadata = getTopicMetadata(Set(OffsetManager.OffsetsTopicName)).head + val securityProtocol = consumerMetadataRequest.securityProtocol + val errorResponse = ConsumerMetadataResponse(None, ErrorMapping.ConsumerCoordinatorNotAvailableCode, consumerMetadataRequest.correlationId) val response = offsetsTopicMetadata.partitionsMetadata.find(_.partitionId == partition).map { partitionMetadata => partitionMetadata.leader.map { leader => - ConsumerMetadataResponse(Some(leader), ErrorMapping.NoError, consumerMetadataRequest.correlationId) + ConsumerMetadataResponse(Some(leader.getBrokerEndPoint(securityProtocol)), ErrorMapping.NoError, consumerMetadataRequest.correlationId) }.getOrElse(errorResponse) }.getOrElse(errorResponse) diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 6e26c54..5a9bac7 100644 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -18,6 +18,7 @@ package kafka.server import java.util.Properties +import kafka.cluster.ProtocolType import kafka.message.{MessageSet, Message} import kafka.consumer.ConsumerConfig import kafka.utils.{VerifiableProperties, ZKConfig, Utils} @@ -91,23 +92,16 @@ class KafkaConfig private (val props: VerifiableProperties) extends ZKConfig(pro /*********** Socket Server Configuration ***********/ - /* the port to listen and accept connections on */ - val port: Int = props.getInt("port", 6667) + /* Listener List - Comma-separated list of URIs we will listen on and their protocols. + * Specify hostname as 0.0.0.0 to bind to all interfaces + * Leave hostname empty to bind to default interface */ + val listeners: String = props.getString("listeners", "PLAINTEXT://0.0.0.0:6667") - /* hostname of broker. If this is set, it will only bind to this address. If this is not set, - * it will bind to all interfaces */ - val hostName: String = props.getString("host.name", null) + /* Listeners to publish to ZooKeeper for clients to use, if different than the listeners above. + * In IaaS environments, this may need to be different from the interface to which the broker binds. + * If this is not set, it will use the value for "listeners" */ + val advertisedListeners: String = props.getString("advertised.listeners", listeners) - /* hostname to publish to ZooKeeper for clients to use. In IaaS environments, this may - * need to be different from the interface to which the broker binds. If this is not set, - * it will use the value for "host.name" if configured. Otherwise - * it will use the value returned from java.net.InetAddress.getCanonicalHostName(). */ - val advertisedHostName: String = props.getString("advertised.host.name", hostName) - - /* the port to publish to ZooKeeper for clients to use. In IaaS environments, this may - * need to be different from the port to which the broker binds. If this is not set, - * it will publish the same port that the broker binds to. */ - val advertisedPort: Int = props.getInt("advertised.port", port) /* the SO_SNDBUFF buffer of the socket sever sockets */ val socketSendBufferBytes: Int = props.getInt("socket.send.buffer.bytes", 100*1024) @@ -127,6 +121,9 @@ class KafkaConfig private (val props: VerifiableProperties) extends ZKConfig(pro /* idle connections timeout: the server socket processor threads close the connections that idle more than this */ val connectionsMaxIdleMs = props.getLong("connections.max.idle.ms", 10*60*1000L) + /* security protocol used to communicate between brokers */ + val securityProtocol = ProtocolType.withName(props.getString("security.protocol","PLAINTEXT")) + /*********** Log Configuration ***********/ /* the default number of log partitions per topic */ @@ -339,4 +336,5 @@ class KafkaConfig private (val props: VerifiableProperties) extends ZKConfig(pro /* Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off */ val deleteTopicEnable = props.getBoolean("delete.topic.enable", false) + } diff --git a/core/src/main/scala/kafka/server/KafkaHealthcheck.scala b/core/src/main/scala/kafka/server/KafkaHealthcheck.scala index 4acdd70..3ee1256 100644 --- a/core/src/main/scala/kafka/server/KafkaHealthcheck.scala +++ b/core/src/main/scala/kafka/server/KafkaHealthcheck.scala @@ -17,6 +17,7 @@ package kafka.server +import kafka.cluster.EndPoint import kafka.utils._ import org.apache.zookeeper.Watcher.Event.KeeperState import org.I0Itec.zkclient.{IZkStateListener, ZkClient} @@ -31,9 +32,8 @@ import java.net.InetAddress * Right now our definition of health is fairly naive. If we register in zk we are healthy, otherwise * we are dead. */ -class KafkaHealthcheck(private val brokerId: Int, - private val advertisedHost: String, - private val advertisedPort: Int, +class KafkaHealthcheck(private val brokerId: Int, + private val advertisedEndpoints: Seq[EndPoint], private val zkSessionTimeoutMs: Int, private val zkClient: ZkClient) extends Logging { @@ -54,13 +54,8 @@ class KafkaHealthcheck(private val brokerId: Int, * Register this broker as "alive" in zookeeper */ def register() { - val advertisedHostName = - if(advertisedHost == null || advertisedHost.trim.isEmpty) - InetAddress.getLocalHost.getCanonicalHostName - else - advertisedHost val jmxPort = System.getProperty("com.sun.management.jmxremote.port", "-1").toInt - ZkUtils.registerBrokerInZk(zkClient, brokerId, advertisedHostName, advertisedPort, zkSessionTimeoutMs, jmxPort) + ZkUtils.registerBrokerInZk(zkClient, brokerId, advertisedEndpoints, zkSessionTimeoutMs, jmxPort) } /** diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 1bf7d10..ccb74cb 100644 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -86,8 +86,7 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg logManager.startup() socketServer = new SocketServer(config.brokerId, - config.hostName, - config.port, + Utils.listenerListToEndPoints(config.listeners), config.numNetworkThreads, config.queuedMaxRequests, config.socketSendBufferBytes, @@ -117,9 +116,10 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg topicConfigManager = new TopicConfigManager(zkClient, logManager) topicConfigManager.startup() - + /* tell everyone we are alive */ - kafkaHealthcheck = new KafkaHealthcheck(config.brokerId, config.advertisedHostName, config.advertisedPort, config.zkSessionTimeoutMs, zkClient) + val endpoints = Utils.listenerListToEndPoints(config.advertisedListeners) + kafkaHealthcheck = new KafkaHealthcheck(config.brokerId,endpoints,config.zkSessionTimeoutMs, zkClient) kafkaHealthcheck.startup() @@ -199,7 +199,8 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg if (channel != null) { channel.disconnect() } - channel = new BlockingChannel(broker.host, broker.port, + channel = new BlockingChannel(broker.getBrokerEndPoint(config.securityProtocol).host, + broker.getBrokerEndPoint(config.securityProtocol).port, BlockingChannel.UseDefaultBufferSize, BlockingChannel.UseDefaultBufferSize, config.controllerSocketTimeoutMs) diff --git a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala index 6879e73..5cf4cfd 100644 --- a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala +++ b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala @@ -30,7 +30,7 @@ class ReplicaFetcherThread(name:String, replicaMgr: ReplicaManager) extends AbstractFetcherThread(name = name, clientId = name, - sourceBroker = sourceBroker, + sourceBroker = sourceBroker.getBrokerEndPoint(brokerConfig.securityProtocol), socketTimeout = brokerConfig.replicaSocketTimeoutMs, socketBufferSize = brokerConfig.replicaSocketReceiveBufferBytes, fetchSize = brokerConfig.replicaFetchMaxBytes, diff --git a/core/src/main/scala/kafka/tools/ConsumerOffsetChecker.scala b/core/src/main/scala/kafka/tools/ConsumerOffsetChecker.scala index d1e7c43..7198322 100644 --- a/core/src/main/scala/kafka/tools/ConsumerOffsetChecker.scala +++ b/core/src/main/scala/kafka/tools/ConsumerOffsetChecker.scala @@ -19,6 +19,7 @@ package kafka.tools import joptsimple._ +import kafka.cluster.ProtocolType import org.I0Itec.zkclient.ZkClient import kafka.utils._ import kafka.consumer.SimpleConsumer @@ -158,7 +159,7 @@ object ConsumerOffsetChecker extends Logging { topicPidMap = immutable.Map(ZkUtils.getPartitionsForTopics(zkClient, topicList).toSeq:_*) val topicPartitions = topicPidMap.flatMap { case(topic, partitionSeq) => partitionSeq.map(TopicAndPartition(topic, _)) }.toSeq - val channel = ClientUtils.channelToOffsetManager(group, zkClient, channelSocketTimeoutMs, channelRetryBackoffMs) + val channel = ClientUtils.channelToOffsetManager(group, zkClient, channelSocketTimeoutMs, channelRetryBackoffMs, ProtocolType.PLAINTEXT) debug("Sending offset fetch request to coordinator %s:%d.".format(channel.host, channel.port)) channel.send(OffsetFetchRequest(group, topicPartitions)) diff --git a/core/src/main/scala/kafka/tools/GetOffsetShell.scala b/core/src/main/scala/kafka/tools/GetOffsetShell.scala index 3d9293e..46078b7 100644 --- a/core/src/main/scala/kafka/tools/GetOffsetShell.scala +++ b/core/src/main/scala/kafka/tools/GetOffsetShell.scala @@ -24,6 +24,7 @@ import kafka.api.{PartitionOffsetRequestInfo, OffsetRequest} import kafka.common.TopicAndPartition import kafka.client.ClientUtils import kafka.utils.{ToolsUtils, CommandLineUtils} +import kafka.cluster.ProtocolType object GetOffsetShell { @@ -93,7 +94,8 @@ object GetOffsetShell { case Some(metadata) => metadata.leader match { case Some(leader) => - val consumer = new SimpleConsumer(leader.host, leader.port, 10000, 100000, clientId) + val consumer = new SimpleConsumer(leader.getBrokerEndPoint(ProtocolType.PLAINTEXT).host, + leader.getBrokerEndPoint(ProtocolType.PLAINTEXT).port, 10000, 100000, clientId) val topicAndPartition = TopicAndPartition(topic, partitionId) val request = OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(time, nOffsets))) val offsets = consumer.getOffsetsBefore(request).partitionErrorAndOffsets(topicAndPartition).offsets diff --git a/core/src/main/scala/kafka/tools/ReplicaVerificationTool.scala b/core/src/main/scala/kafka/tools/ReplicaVerificationTool.scala index ba6ddd7..ca06c1e 100644 --- a/core/src/main/scala/kafka/tools/ReplicaVerificationTool.scala +++ b/core/src/main/scala/kafka/tools/ReplicaVerificationTool.scala @@ -18,7 +18,7 @@ package kafka.tools import joptsimple.OptionParser -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, Broker} import kafka.message.{MessageSet, MessageAndOffset, ByteBufferMessageSet} import java.util.concurrent.CountDownLatch import java.util.concurrent.atomic.AtomicReference @@ -197,7 +197,7 @@ private case class MessageInfo(replicaId: Int, offset: Long, nextOffset: Long, c private class ReplicaBuffer(expectedReplicasPerTopicAndPartition: Map[TopicAndPartition, Int], leadersPerBroker: Map[Int, Seq[TopicAndPartition]], expectedNumFetchers: Int, - brokerMap: Map[Int, Broker], + brokerMap: Map[Int, BrokerEndpoint], initialOffsetTime: Long, reportInterval: Long) extends Logging { private val fetchOffsetMap = new Pool[TopicAndPartition, Long] @@ -335,7 +335,7 @@ private class ReplicaBuffer(expectedReplicasPerTopicAndPartition: Map[TopicAndPa } } -private class ReplicaFetcher(name: String, sourceBroker: Broker, topicAndPartitions: Iterable[TopicAndPartition], +private class ReplicaFetcher(name: String, sourceBroker: BrokerEndpoint, topicAndPartitions: Iterable[TopicAndPartition], replicaBuffer: ReplicaBuffer, socketTimeout: Int, socketBufferSize: Int, fetchSize: Int, maxWait: Int, minBytes: Int, doVerification: Boolean) extends ShutdownableThread(name) { diff --git a/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala b/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala index b4f903b..77c4900 100644 --- a/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala +++ b/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala @@ -22,7 +22,7 @@ import kafka.utils._ import kafka.consumer._ import kafka.client.ClientUtils import kafka.api.{OffsetRequest, FetchRequestBuilder, Request} -import kafka.cluster.Broker +import kafka.cluster.{ProtocolType, Broker} import scala.collection.JavaConversions._ import kafka.common.TopicAndPartition @@ -167,7 +167,9 @@ object SimpleConsumerShell extends Logging { System.exit(1) } if (startingOffset < 0) { - val simpleConsumer = new SimpleConsumer(fetchTargetBroker.host, fetchTargetBroker.port, ConsumerConfig.SocketTimeout, + val simpleConsumer = new SimpleConsumer(fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).host, + fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).port, + ConsumerConfig.SocketTimeout, ConsumerConfig.SocketBufferSize, clientId) try { startingOffset = simpleConsumer.earliestOrLatestOffset(TopicAndPartition(topic, partitionId), startingOffset, @@ -188,8 +190,12 @@ object SimpleConsumerShell extends Logging { val replicaString = if(replicaId > 0) "leader" else "replica" info("Starting simple consumer shell to partition [%s, %d], %s [%d], host and port: [%s, %d], from offset [%d]" - .format(topic, partitionId, replicaString, replicaId, fetchTargetBroker.host, fetchTargetBroker.port, startingOffset)) - val simpleConsumer = new SimpleConsumer(fetchTargetBroker.host, fetchTargetBroker.port, 10000, 64*1024, clientId) + .format(topic, partitionId, replicaString, replicaId, + fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).host, + fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).port, startingOffset)) + val simpleConsumer = new SimpleConsumer(fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).host, + fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).port, + 10000, 64*1024, clientId) val thread = Utils.newThread("kafka-simpleconsumer-shell", new Runnable() { def run() { var offset = startingOffset diff --git a/core/src/main/scala/kafka/tools/UpdateOffsetsInZK.scala b/core/src/main/scala/kafka/tools/UpdateOffsetsInZK.scala index 111c9a8..38d3b48 100644 --- a/core/src/main/scala/kafka/tools/UpdateOffsetsInZK.scala +++ b/core/src/main/scala/kafka/tools/UpdateOffsetsInZK.scala @@ -17,6 +17,7 @@ package kafka.tools +import kafka.cluster.ProtocolType import org.I0Itec.zkclient.ZkClient import kafka.consumer.{SimpleConsumer, ConsumerConfig} import kafka.api.{PartitionOffsetRequestInfo, OffsetRequest} @@ -65,7 +66,9 @@ object UpdateOffsetsInZK { ZkUtils.getBrokerInfo(zkClient, broker) match { case Some(brokerInfo) => - val consumer = new SimpleConsumer(brokerInfo.host, brokerInfo.port, 10000, 100 * 1024, "UpdateOffsetsInZk") + val consumer = new SimpleConsumer(brokerInfo.getBrokerEndPoint(ProtocolType.PLAINTEXT).host, + brokerInfo.getBrokerEndPoint(ProtocolType.PLAINTEXT).port, + 10000, 100 * 1024, "UpdateOffsetsInZk") val topicAndPartition = TopicAndPartition(topic, partition) val request = OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(offsetOption, 1))) val offset = consumer.getOffsetsBefore(request).partitionErrorAndOffsets(topicAndPartition).offsets.head diff --git a/core/src/main/scala/kafka/utils/Utils.scala b/core/src/main/scala/kafka/utils/Utils.scala index 738c1af..b823c1e 100644 --- a/core/src/main/scala/kafka/utils/Utils.scala +++ b/core/src/main/scala/kafka/utils/Utils.scala @@ -24,11 +24,14 @@ import java.nio.channels._ import java.util.concurrent.locks.{ReadWriteLock, Lock} import java.lang.management._ import javax.management._ + import scala.collection._ import scala.collection.mutable import java.util.Properties import kafka.common.KafkaException import kafka.common.KafkaStorageException +import kafka.cluster.EndPoint +import kafka.cluster.ProtocolType /** @@ -607,4 +610,9 @@ object Utils extends Logging { .filter{ case (k,l) => (l > 1) } .keys } + + def listenerListToEndPoints(listeners: String): Seq[EndPoint] = { + val listenerList = parseCsvList(listeners) + listenerList.map(listener => EndPoint.createEndPoint(listener)) + } } diff --git a/core/src/main/scala/kafka/utils/ZkUtils.scala b/core/src/main/scala/kafka/utils/ZkUtils.scala index 56e3e88..aed294f 100644 --- a/core/src/main/scala/kafka/utils/ZkUtils.scala +++ b/core/src/main/scala/kafka/utils/ZkUtils.scala @@ -17,7 +17,8 @@ package kafka.utils -import kafka.cluster.{Broker, Cluster} +import kafka.cluster.ProtocolType.ProtocolType +import kafka.cluster._ import kafka.consumer.{ConsumerThreadId, TopicCount} import org.I0Itec.zkclient.ZkClient import org.I0Itec.zkclient.exception.{ZkNodeExistsException, ZkNoNodeException, @@ -82,6 +83,10 @@ object ZkUtils extends Logging { brokerIds.map(_.toInt).map(getBrokerInfo(zkClient, _)).filter(_.isDefined).map(_.get) } + def getAllBrokerEndPointsForChannel(zkClient: ZkClient, protocolType: ProtocolType): Seq[BrokerEndpoint] = { + getAllBrokersInCluster(zkClient).map(_.getBrokerEndPoint(protocolType)) + } + def getLeaderAndIsrForPartition(zkClient: ZkClient, topic: String, partition: Int):Option[LeaderAndIsr] = { ReplicationUtils.getLeaderIsrAndEpochForPartition(zkClient, topic, partition).map(_.leaderAndIsr) } @@ -158,11 +163,15 @@ object ZkUtils extends Logging { } } - def registerBrokerInZk(zkClient: ZkClient, id: Int, host: String, port: Int, timeout: Int, jmxPort: Int) { + def registerBrokerInZk(zkClient: ZkClient, id: Int, advertisedEndpoints: Seq[EndPoint], timeout: Int, jmxPort: Int) { val brokerIdPath = ZkUtils.BrokerIdsPath + "/" + id val timestamp = SystemTime.milliseconds.toString - val brokerInfo = Json.encode(Map("version" -> 1, "host" -> host, "port" -> port, "jmx_port" -> jmxPort, "timestamp" -> timestamp)) - val expectedBroker = new Broker(id, host, port) + val defaultEndPoint = advertisedEndpoints.find(_.protocolType == ProtocolType.PLAINTEXT).get + val host = defaultEndPoint.host + val port = defaultEndPoint.port + + val brokerInfo = Json.encode(Map("version" -> 1, "host" -> host, "port" -> port, "endpoints"->advertisedEndpoints.mkString(","), "jmx_port" -> jmxPort, "timestamp" -> timestamp)) + val expectedBroker = new Broker(id, advertisedEndpoints) try { createEphemeralPathExpectConflictHandleZKBug(zkClient, brokerIdPath, brokerInfo, expectedBroker, diff --git a/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala b/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala index 5ec613c..66c6c4e 100644 --- a/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala +++ b/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala @@ -19,7 +19,6 @@ package kafka.api.test import org.junit.Test import org.junit.Assert._ - import java.lang.Integer import java.util.{Properties, Random} import java.util.concurrent.{TimeoutException, TimeUnit, ExecutionException} @@ -29,7 +28,9 @@ import kafka.common.Topic import kafka.consumer.SimpleConsumer import kafka.server.KafkaConfig import kafka.integration.KafkaServerTestHarness -import kafka.utils.{TestZKUtils, ShutdownableThread, TestUtils} +import kafka.utils.{TestZKUtils, ShutdownableThread, TestUtils,Utils} +import kafka.consumer.SimpleConsumer +import kafka.utils.Utils import org.apache.kafka.common.KafkaException import org.apache.kafka.common.errors.{InvalidTopicException, NotEnoughReplicasException} @@ -64,8 +65,11 @@ class ProducerFailureHandlingTest extends KafkaServerTestHarness { super.setUp() // TODO: we need to migrate to new consumers when 0.9 is final - consumer1 = new SimpleConsumer("localhost", configs(0).port, 100, 1024*1024, "") - consumer2 = new SimpleConsumer("localhost", configs(1).port, 100, 1024*1024, "") + + val endpoint1 = Utils.listenerListToEndPoints(configs(0).listeners).head + val endpoint2 = Utils.listenerListToEndPoints(configs(1).listeners).head + consumer1 = new SimpleConsumer("localhost", endpoint1.port, 100, 1024*1024, "") + consumer2 = new SimpleConsumer("localhost", endpoint2.port, 100, 1024*1024, "") producer1 = TestUtils.createNewProducer(brokerList, acks = 0, blockOnBufferFull = false, bufferSize = producerBufferSize) producer2 = TestUtils.createNewProducer(brokerList, acks = 1, blockOnBufferFull = false, bufferSize = producerBufferSize) diff --git a/core/src/test/scala/integration/kafka/api/ProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/ProducerSendTest.scala index 6196060..80acac2 100644 --- a/core/src/test/scala/integration/kafka/api/ProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/ProducerSendTest.scala @@ -25,7 +25,7 @@ import org.junit.Test import org.junit.Assert._ import kafka.server.KafkaConfig -import kafka.utils.{TestZKUtils, TestUtils} +import kafka.utils.{Utils, TestZKUtils, TestUtils} import kafka.consumer.SimpleConsumer import kafka.api.FetchRequestBuilder import kafka.message.Message @@ -51,8 +51,10 @@ class ProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { super.setUp() // TODO: we need to migrate to new consumers when 0.9 is final - consumer1 = new SimpleConsumer("localhost", configs(0).port, 100, 1024*1024, "") - consumer2 = new SimpleConsumer("localhost", configs(1).port, 100, 1024*1024, "") + val endpoint1 = Utils.listenerListToEndPoints(configs(0).listeners).head + val endpoint2 = Utils.listenerListToEndPoints(configs(1).listeners).head + consumer1 = new SimpleConsumer("localhost", endpoint1.port, 100, 1024*1024, "") + consumer2 = new SimpleConsumer("localhost", endpoint2.port, 100, 1024*1024, "") } override def tearDown() { diff --git a/core/src/test/scala/other/kafka/TestOffsetManager.scala b/core/src/test/scala/other/kafka/TestOffsetManager.scala index 41f334d..67301ed 100644 --- a/core/src/test/scala/other/kafka/TestOffsetManager.scala +++ b/core/src/test/scala/other/kafka/TestOffsetManager.scala @@ -1,5 +1,6 @@ package other.kafka +import kafka.cluster.ProtocolType import org.I0Itec.zkclient.ZkClient import kafka.api._ import kafka.utils.{ShutdownableThread, ZKStringSerializer} @@ -110,7 +111,7 @@ object TestOffsetManager { private val fetchTimer = new KafkaTimer(timer) private val channels = mutable.Map[Int, BlockingChannel]() - private var metadataChannel = ClientUtils.channelToAnyBroker(zkClient, SocketTimeoutMs) + private var metadataChannel = ClientUtils.channelToAnyBroker(zkClient, ProtocolType.PLAINTEXT, SocketTimeoutMs) private val numErrors = new AtomicInteger(0) @@ -156,7 +157,7 @@ object TestOffsetManager { println("Error while querying %s:%d - shutting down query channel.".format(metadataChannel.host, metadataChannel.port)) metadataChannel.disconnect() println("Creating new query channel.") - metadataChannel = ClientUtils.channelToAnyBroker(zkClient, SocketTimeoutMs) + metadataChannel = ClientUtils.channelToAnyBroker(zkClient, ProtocolType.PLAINTEXT, SocketTimeoutMs) } finally { Thread.sleep(fetchIntervalMs) diff --git a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala index 1bf2667..2f338b7 100644 --- a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala +++ b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala @@ -22,7 +22,7 @@ import kafka.zk.ZooKeeperTestHarness import kafka.utils.TestUtils._ import junit.framework.Assert._ import kafka.utils.{ZkUtils, Utils, TestUtils} -import kafka.cluster.Broker +import kafka.cluster.{ProtocolType, EndPoint, Broker} import kafka.client.ClientUtils import kafka.server.{KafkaConfig, KafkaServer} @@ -61,7 +61,7 @@ class AddPartitionsTest extends JUnit3Suite with ZooKeeperTestHarness { val server4 = TestUtils.createServer(new KafkaConfig(configProps4)) servers ++= List(server1, server2, server3, server4) - brokers = servers.map(s => new Broker(s.config.brokerId, s.config.hostName, s.config.port)) + brokers = servers.map(s => new Broker(s.config.brokerId, Utils.listenerListToEndPoints(s.config.listeners))) // create topics first createTopic(zkClient, topic1, partitionReplicaAssignment = Map(0->Seq(0,1)), servers = servers) @@ -109,7 +109,7 @@ class AddPartitionsTest extends JUnit3Suite with ZooKeeperTestHarness { // read metadata from a broker and verify the new topic partitions exist TestUtils.waitUntilMetadataIsPropagated(servers, topic1, 1) TestUtils.waitUntilMetadataIsPropagated(servers, topic1, 2) - val metadata = ClientUtils.fetchTopicMetadata(Set(topic1), brokers, "AddPartitionsTest-testIncrementPartitions", + val metadata = ClientUtils.fetchTopicMetadata(Set(topic1), brokers.map(_.getBrokerEndPoint(ProtocolType.PLAINTEXT)), "AddPartitionsTest-testIncrementPartitions", 2000,0).topicsMetadata val metaDataForTopic1 = metadata.filter(p => p.topic.equals(topic1)) val partitionDataForTopic1 = metaDataForTopic1.head.partitionsMetadata @@ -134,7 +134,7 @@ class AddPartitionsTest extends JUnit3Suite with ZooKeeperTestHarness { // read metadata from a broker and verify the new topic partitions exist TestUtils.waitUntilMetadataIsPropagated(servers, topic2, 1) TestUtils.waitUntilMetadataIsPropagated(servers, topic2, 2) - val metadata = ClientUtils.fetchTopicMetadata(Set(topic2), brokers, "AddPartitionsTest-testManualAssignmentOfReplicas", + val metadata = ClientUtils.fetchTopicMetadata(Set(topic2), brokers.map(_.getBrokerEndPoint(ProtocolType.PLAINTEXT)), "AddPartitionsTest-testManualAssignmentOfReplicas", 2000,0).topicsMetadata val metaDataForTopic2 = metadata.filter(p => p.topic.equals(topic2)) val partitionDataForTopic2 = metaDataForTopic2.head.partitionsMetadata @@ -158,7 +158,7 @@ class AddPartitionsTest extends JUnit3Suite with ZooKeeperTestHarness { TestUtils.waitUntilMetadataIsPropagated(servers, topic3, 5) TestUtils.waitUntilMetadataIsPropagated(servers, topic3, 6) - val metadata = ClientUtils.fetchTopicMetadata(Set(topic3), brokers, "AddPartitionsTest-testReplicaPlacement", + val metadata = ClientUtils.fetchTopicMetadata(Set(topic3), brokers.map(_.getBrokerEndPoint(ProtocolType.PLAINTEXT)), "AddPartitionsTest-testReplicaPlacement", 2000,0).topicsMetadata val metaDataForTopic3 = metadata.filter(p => p.topic.equals(topic3)).head diff --git a/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala b/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala index cd16ced..d69d02a 100644 --- a/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala +++ b/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala @@ -22,7 +22,7 @@ import org.scalatest.junit.JUnitSuite import junit.framework.Assert._ import java.nio.ByteBuffer import kafka.message.{Message, ByteBufferMessageSet} -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, ProtocolType, EndPoint, Broker} import kafka.common.{OffsetAndMetadata, ErrorMapping, OffsetMetadataAndError} import kafka.utils.SystemTime import org.apache.kafka.common.requests._ @@ -80,7 +80,9 @@ object SerializationTestUtils { TopicAndPartition(topic2, 3) -> PartitionFetchInfo(4000, 100) ) - private val brokers = List(new Broker(0, "localhost", 1011), new Broker(1, "localhost", 1012), new Broker(2, "localhost", 1013)) + private val brokers = List(new Broker(0, List(EndPoint("localhost", 1011, ProtocolType.PLAINTEXT))), + new Broker(1, List(EndPoint("localhost", 1012, ProtocolType.PLAINTEXT))), + new Broker(2, List(EndPoint("localhost", 1013, ProtocolType.PLAINTEXT)))) private val partitionMetaData0 = new PartitionMetadata(0, Some(brokers.head), replicas = brokers, isr = brokers, errorCode = 0) private val partitionMetaData1 = new PartitionMetadata(1, Some(brokers.head), replicas = brokers, isr = brokers.tail, errorCode = 1) private val partitionMetaData2 = new PartitionMetadata(2, Some(brokers.head), replicas = brokers, isr = brokers, errorCode = 2) @@ -144,11 +146,11 @@ object SerializationTestUtils { } def createTestTopicMetadataRequest: TopicMetadataRequest = { - new TopicMetadataRequest(1, 1, "client 1", Seq(topic1, topic2)) + new TopicMetadataRequest(1, 1, "client 1", ProtocolType.PLAINTEXT, Seq(topic1, topic2)) } def createTestTopicMetadataResponse: TopicMetadataResponse = { - new TopicMetadataResponse(brokers, Seq(topicmetaData1, topicmetaData2), 1) + new TopicMetadataResponse(List(BrokerEndpoint(1,"localhost",9092,null)), Seq(topicmetaData1, topicmetaData2), 1) } def createTestOffsetCommitRequestV1: OffsetCommitRequest = { @@ -192,7 +194,7 @@ object SerializationTestUtils { } def createConsumerMetadataResponse: ConsumerMetadataResponse = { - ConsumerMetadataResponse(Some(brokers.head), ErrorMapping.NoError) + ConsumerMetadataResponse(Some(brokers.head.getBrokerEndPoint(ProtocolType.PLAINTEXT)), ErrorMapping.NoError) } def createHeartbeatRequestAndHeader: HeartbeatRequestAndHeader = { diff --git a/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala b/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala new file mode 100644 index 0000000..289d87b --- /dev/null +++ b/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.cluster + +import java.nio.ByteBuffer + +import kafka.utils.Logging +import org.junit.Test +import org.scalatest.junit.JUnit3Suite + +import scala.collection.mutable + +class BrokerTest extends JUnit3Suite with Logging { + + @Test + def testSerDe() = { + + val endpoint = new EndPoint("myhost",9092,ProtocolType.PLAINTEXT) + val listEndPoints = List(endpoint) + val origBroker = new Broker(1,listEndPoints) + val brokerBytes = ByteBuffer.allocate(origBroker.sizeInBytes) + + origBroker.writeTo(brokerBytes) + + val newBroker = Broker.readFrom(brokerBytes.flip().asInstanceOf[ByteBuffer]) + assert(origBroker == newBroker) + } + + @Test + def testHashAndEquals() = { + val endpoint1 = new EndPoint("myhost",9092,ProtocolType.PLAINTEXT) + val endpoint2 = new EndPoint("myhost",9092,ProtocolType.PLAINTEXT) + val endpoint3 = new EndPoint("myhost",1111,ProtocolType.PLAINTEXT) + val endpoint4 = new EndPoint("other",1111,ProtocolType.PLAINTEXT) + val broker1 = new Broker(1,List(endpoint1)) + val broker2 = new Broker(1,List(endpoint2)) + val broker3 = new Broker(2,List(endpoint3)) + val broker4 = new Broker(1,List(endpoint4)) + + assert(broker1 == broker2) + assert(broker1 != broker3) + assert(broker1 != broker4) + assert(broker1.hashCode() == broker2.hashCode()) + assert(broker1.hashCode() != broker3.hashCode()) + assert(broker1.hashCode() != broker4.hashCode()) + + val hashmap = new mutable.HashMap[Broker,Int]() + hashmap.put(broker1,1) + assert(hashmap.getOrElse(broker1,-1) == 1) + } + + @Test + def testFromJSON() = { + //val brokerInfoStr = "{\"jmx_port\":-1,\"timestamp\":\"1416974968782\",\"host\":\"kafkaf-4.ent.cloudera.com\",\"version\":1,\"port\":9092}" + val brokerInfoStr = "{\"version\":1," + + "\"host\":\"localhost\"," + + "\"port\":9092," + + "\"jmx_port\":9999," + + "\"timestamp\":\"1416974968782\"," + + "\"endpoints\":\"PLAINTEXT://localhost:9092\"}" + val broker = Broker.createBroker(1, brokerInfoStr) + assert(broker.id == 1) + assert(broker.getBrokerEndPoint(ProtocolType.PLAINTEXT).host == "localhost") + assert(broker.getBrokerEndPoint(ProtocolType.PLAINTEXT).port == 9092) + } + + @Test + def endpointFromURI() = { + val connectionString = "PLAINTEXT://localhost:9092" + val endpoint = BrokerEndpoint.createBrokerEndPoint(1,connectionString) + assert(endpoint.host == "localhost") + assert(endpoint.port == 9092) + assert(endpoint.protocolType == ProtocolType.PLAINTEXT) + } + + + + +} diff --git a/core/src/test/scala/unit/kafka/consumer/ConsumerIteratorTest.scala b/core/src/test/scala/unit/kafka/consumer/ConsumerIteratorTest.scala index c0355cc..ec31b34 100644 --- a/core/src/test/scala/unit/kafka/consumer/ConsumerIteratorTest.scala +++ b/core/src/test/scala/unit/kafka/consumer/ConsumerIteratorTest.scala @@ -29,7 +29,7 @@ import kafka.utils.TestUtils._ import kafka.utils._ import org.junit.Test import kafka.serializer._ -import kafka.cluster.{Broker, Cluster} +import kafka.cluster.{ProtocolType, EndPoint, Broker, Cluster} import org.scalatest.junit.JUnit3Suite import kafka.integration.KafkaServerTestHarness @@ -46,7 +46,7 @@ class ConsumerIteratorTest extends JUnit3Suite with KafkaServerTestHarness { val group = "group1" val consumer0 = "consumer0" val consumedOffset = 5 - val cluster = new Cluster(configs.map(c => new Broker(c.brokerId, "localhost", c.port))) + val cluster = new Cluster(configs.map(c => new Broker(c.brokerId,Utils.listenerListToEndPoints(c.listeners)))) val queue = new LinkedBlockingQueue[FetchedDataChunk] val topicInfos = configs.map(c => new PartitionTopicInfo(topic, 0, diff --git a/core/src/test/scala/unit/kafka/integration/FetcherTest.scala b/core/src/test/scala/unit/kafka/integration/FetcherTest.scala index 25845ab..ab139ca 100644 --- a/core/src/test/scala/unit/kafka/integration/FetcherTest.scala +++ b/core/src/test/scala/unit/kafka/integration/FetcherTest.scala @@ -29,7 +29,7 @@ import kafka.consumer._ import kafka.serializer._ import kafka.producer.{KeyedMessage, Producer} import kafka.utils.TestUtils._ -import kafka.utils.TestUtils +import kafka.utils.{Utils, TestUtils} class FetcherTest extends JUnit3Suite with KafkaServerTestHarness { @@ -39,7 +39,7 @@ class FetcherTest extends JUnit3Suite with KafkaServerTestHarness { yield new KafkaConfig(props) val messages = new mutable.HashMap[Int, Seq[Array[Byte]]] val topic = "topic" - val cluster = new Cluster(configs.map(c => new Broker(c.brokerId, "localhost", c.port))) + val cluster = new Cluster(configs.map(c => new Broker(c.brokerId,Utils.listenerListToEndPoints(c.listeners)))) val shutdown = ZookeeperConsumerConnector.shutdownCommand val queue = new LinkedBlockingQueue[FetchedDataChunk] val topicInfos = configs.map(c => new PartitionTopicInfo(topic, diff --git a/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala b/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala index 35dc071..ceb39fa 100644 --- a/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala +++ b/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala @@ -22,8 +22,8 @@ import kafka.zk.ZooKeeperTestHarness import kafka.admin.AdminUtils import java.nio.ByteBuffer import junit.framework.Assert._ -import kafka.cluster.Broker -import kafka.utils.TestUtils +import kafka.cluster.{ProtocolType, EndPoint, Broker} +import kafka.utils.{Utils, TestUtils} import kafka.utils.TestUtils._ import kafka.server.{KafkaServer, KafkaConfig} import kafka.api.TopicMetadataRequest @@ -34,7 +34,7 @@ class TopicMetadataTest extends JUnit3Suite with ZooKeeperTestHarness { val props = createBrokerConfigs(1) val configs = props.map(p => new KafkaConfig(p)) private var server1: KafkaServer = null - val brokers = configs.map(c => new Broker(c.brokerId,c.hostName,c.port)) + val brokerEndPoints = configs.map(c => new Broker(c.brokerId,Utils.listenerListToEndPoints(c.listeners)).getBrokerEndPoint(ProtocolType.PLAINTEXT)) override def setUp() { super.setUp() @@ -67,7 +67,7 @@ class TopicMetadataTest extends JUnit3Suite with ZooKeeperTestHarness { val topic = "test" createTopic(zkClient, topic, numPartitions = 1, replicationFactor = 1, servers = Seq(server1)) - var topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic),brokers,"TopicMetadataTest-testBasicTopicMetadata", + var topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic),brokerEndPoints,"TopicMetadataTest-testBasicTopicMetadata", 2000,0).topicsMetadata assertEquals(ErrorMapping.NoError, topicsMetadata.head.errorCode) assertEquals(ErrorMapping.NoError, topicsMetadata.head.partitionsMetadata.head.errorCode) @@ -87,7 +87,7 @@ class TopicMetadataTest extends JUnit3Suite with ZooKeeperTestHarness { createTopic(zkClient, topic2, numPartitions = 1, replicationFactor = 1, servers = Seq(server1)) // issue metadata request with empty list of topics - var topicsMetadata = ClientUtils.fetchTopicMetadata(Set.empty, brokers, "TopicMetadataTest-testGetAllTopicMetadata", + var topicsMetadata = ClientUtils.fetchTopicMetadata(Set.empty, brokerEndPoints, "TopicMetadataTest-testGetAllTopicMetadata", 2000, 0).topicsMetadata assertEquals(ErrorMapping.NoError, topicsMetadata.head.errorCode) assertEquals(2, topicsMetadata.size) @@ -106,7 +106,7 @@ class TopicMetadataTest extends JUnit3Suite with ZooKeeperTestHarness { def testAutoCreateTopic { // auto create topic val topic = "testAutoCreateTopic" - var topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic),brokers,"TopicMetadataTest-testAutoCreateTopic", + var topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic),brokerEndPoints,"TopicMetadataTest-testAutoCreateTopic", 2000,0).topicsMetadata assertEquals(ErrorMapping.LeaderNotAvailableCode, topicsMetadata.head.errorCode) assertEquals("Expecting metadata only for 1 topic", 1, topicsMetadata.size) @@ -118,7 +118,7 @@ class TopicMetadataTest extends JUnit3Suite with ZooKeeperTestHarness { TestUtils.waitUntilMetadataIsPropagated(Seq(server1), topic, 0) // retry the metadata for the auto created topic - topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic),brokers,"TopicMetadataTest-testBasicTopicMetadata", + topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic),brokerEndPoints,"TopicMetadataTest-testBasicTopicMetadata", 2000,0).topicsMetadata assertEquals(ErrorMapping.NoError, topicsMetadata.head.errorCode) assertEquals(ErrorMapping.NoError, topicsMetadata.head.partitionsMetadata.head.errorCode) diff --git a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala index 5f4d852..d5bebc6 100644 --- a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala +++ b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala @@ -19,6 +19,7 @@ package kafka.network; import java.net._ import java.io._ +import kafka.cluster.{ProtocolType, EndPoint} import org.junit._ import org.scalatest.junit.JUnitSuite import java.util.Random @@ -34,8 +35,7 @@ import kafka.utils.TestUtils class SocketServerTest extends JUnitSuite { val server: SocketServer = new SocketServer(0, - host = null, - port = kafka.utils.TestUtils.choosePort, + List(EndPoint(null,kafka.utils.TestUtils.choosePort,ProtocolType.PLAINTEXT)), numProcessorThreads = 1, maxQueuedRequests = 50, sendBufferSize = 300000, @@ -71,7 +71,7 @@ class SocketServerTest extends JUnitSuite { channel.sendResponse(new RequestChannel.Response(request.processor, request, send)) } - def connect() = new Socket("localhost", server.port) + def connect() = new Socket("localhost", server.endpoints.find(_.protocolType == ProtocolType.PLAINTEXT).get.port) @After def cleanup() { diff --git a/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala b/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala index d60d8e0..708fa99 100644 --- a/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala +++ b/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala @@ -21,6 +21,7 @@ import java.net.SocketTimeoutException import java.util.Properties import junit.framework.Assert import kafka.admin.AdminUtils +import kafka.cluster.ProtocolType import kafka.integration.KafkaServerTestHarness import kafka.message._ import kafka.server.KafkaConfig @@ -39,7 +40,8 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testReachableServer() { val server = servers.head - val props = TestUtils.getSyncProducerConfig(server.socketServer.port) + val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val props = TestUtils.getSyncProducerConfig(port) val producer = new SyncProducer(new SyncProducerConfig(props)) val firstStart = SystemTime.milliseconds @@ -74,7 +76,8 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testEmptyProduceRequest() { val server = servers.head - val props = TestUtils.getSyncProducerConfig(server.socketServer.port) + val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val props = TestUtils.getSyncProducerConfig(port) val correlationId = 0 val clientId = SyncProducerConfig.DefaultClientId @@ -91,7 +94,8 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testMessageSizeTooLarge() { val server = servers.head - val props = TestUtils.getSyncProducerConfig(server.socketServer.port) + val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val props = TestUtils.getSyncProducerConfig(port) val producer = new SyncProducer(new SyncProducerConfig(props)) TestUtils.createTopic(zkClient, "test", numPartitions = 1, replicationFactor = 1, servers = servers) @@ -118,8 +122,9 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testMessageSizeTooLargeWithAckZero() { val server = servers.head + val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val props = TestUtils.getSyncProducerConfig(port) - val props = TestUtils.getSyncProducerConfig(server.socketServer.port) props.put("request.required.acks", "0") val producer = new SyncProducer(new SyncProducerConfig(props)) @@ -145,7 +150,8 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testProduceCorrectlyReceivesResponse() { val server = servers.head - val props = TestUtils.getSyncProducerConfig(server.socketServer.port) + val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val props = TestUtils.getSyncProducerConfig(port) val producer = new SyncProducer(new SyncProducerConfig(props)) val messages = new ByteBufferMessageSet(NoCompressionCodec, new Message(messageBytes)) @@ -191,7 +197,8 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { val timeoutMs = 500 val server = servers.head - val props = TestUtils.getSyncProducerConfig(server.socketServer.port) + val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val props = TestUtils.getSyncProducerConfig(port) val producer = new SyncProducer(new SyncProducerConfig(props)) val messages = new ByteBufferMessageSet(NoCompressionCodec, new Message(messageBytes)) @@ -217,7 +224,8 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testProduceRequestWithNoResponse() { val server = servers.head - val props = TestUtils.getSyncProducerConfig(server.socketServer.port) + val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val props = TestUtils.getSyncProducerConfig(port) val correlationId = 0 val clientId = SyncProducerConfig.DefaultClientId val ackTimeoutMs = SyncProducerConfig.DefaultAckTimeoutMs @@ -232,8 +240,9 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { def testNotEnoughReplicas() { val topicName = "minisrtest" val server = servers.head + val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val props = TestUtils.getSyncProducerConfig(port) - val props = TestUtils.getSyncProducerConfig(server.socketServer.port) props.put("request.required.acks", "-1") val producer = new SyncProducer(new SyncProducerConfig(props)) diff --git a/core/src/test/scala/unit/kafka/server/AdvertiseBrokerTest.scala b/core/src/test/scala/unit/kafka/server/AdvertiseBrokerTest.scala index f0c4a56..49e3933 100644 --- a/core/src/test/scala/unit/kafka/server/AdvertiseBrokerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AdvertiseBrokerTest.scala @@ -17,6 +17,7 @@ package kafka.server +import kafka.cluster.ProtocolType import org.scalatest.junit.JUnit3Suite import kafka.zk.ZooKeeperTestHarness import junit.framework.Assert._ @@ -31,9 +32,8 @@ class AdvertiseBrokerTest extends JUnit3Suite with ZooKeeperTestHarness { override def setUp() { super.setUp() val props = TestUtils.createBrokerConfig(brokerId, TestUtils.choosePort()) - props.put("advertised.host.name", advertisedHostName) - props.put("advertised.port", advertisedPort.toString) - + props.put("advertised.listeners",ProtocolType.PLAINTEXT.toString+"://"+advertisedHostName+":"+advertisedPort.toString) + server = TestUtils.createServer(new KafkaConfig(props)) } @@ -45,8 +45,9 @@ class AdvertiseBrokerTest extends JUnit3Suite with ZooKeeperTestHarness { def testBrokerAdvertiseToZK { val brokerInfo = ZkUtils.getBrokerInfo(zkClient, brokerId) - assertEquals(advertisedHostName, brokerInfo.get.host) - assertEquals(advertisedPort, brokerInfo.get.port) + val endpoint = brokerInfo.get.endPoints.find(_.protocolType == ProtocolType.PLAINTEXT).get + assertEquals(advertisedHostName, endpoint.host) + assertEquals(advertisedPort, endpoint.port) } } \ No newline at end of file diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala index 2377abe..649e35e 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala @@ -17,10 +17,11 @@ package kafka.server +import kafka.cluster.ProtocolType import org.junit.Test import junit.framework.Assert._ import org.scalatest.junit.JUnit3Suite -import kafka.utils.TestUtils +import kafka.utils.{Utils, TestUtils} class KafkaConfigTest extends JUnit3Suite { @@ -91,12 +92,13 @@ class KafkaConfigTest extends JUnit3Suite { val hostName = "fake-host" val props = TestUtils.createBrokerConfig(0, port) - props.put("host.name", hostName) + props.put("listeners", "PLAINTEXT://"+hostName+":"+port) val serverConfig = new KafkaConfig(props) - - assertEquals(serverConfig.advertisedHostName, hostName) - assertEquals(serverConfig.advertisedPort, port) + val endpoints = Utils.listenerListToEndPoints(serverConfig.advertisedListeners) + val endpoint = endpoints.find(_.protocolType == ProtocolType.PLAINTEXT).get + assertEquals(endpoint.host, hostName) + assertEquals(endpoint.port, port) } @Test @@ -106,13 +108,14 @@ class KafkaConfigTest extends JUnit3Suite { val advertisedPort = 1234 val props = TestUtils.createBrokerConfig(0, port) - props.put("advertised.host.name", advertisedHostName) - props.put("advertised.port", advertisedPort.toString) + props.put("advertised.listeners", "PLAINTEXT://"+advertisedHostName+":"+advertisedPort.toString) val serverConfig = new KafkaConfig(props) + val endpoints = Utils.listenerListToEndPoints(serverConfig.advertisedListeners) + val endpoint = endpoints.find(_.protocolType == ProtocolType.PLAINTEXT).get - assertEquals(serverConfig.advertisedHostName, advertisedHostName) - assertEquals(serverConfig.advertisedPort, advertisedPort) + assertEquals(endpoint.host, advertisedHostName) + assertEquals(endpoint.port, advertisedPort) } @Test diff --git a/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala b/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala index c2ba07c..bc6bdd6 100644 --- a/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala +++ b/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala @@ -23,7 +23,7 @@ import kafka.utils.TestUtils._ import junit.framework.Assert._ import kafka.utils.{ZkUtils, Utils, TestUtils} import kafka.controller.{ControllerContext, LeaderIsrAndControllerEpoch, ControllerChannelManager} -import kafka.cluster.Broker +import kafka.cluster.{ProtocolType, Broker} import kafka.common.ErrorMapping import kafka.api._ @@ -118,7 +118,8 @@ class LeaderElectionTest extends JUnit3Suite with ZooKeeperTestHarness { // start another controller val controllerId = 2 val controllerConfig = new KafkaConfig(TestUtils.createBrokerConfig(controllerId, TestUtils.choosePort())) - val brokers = servers.map(s => new Broker(s.config.brokerId, "localhost", s.config.port)) + val endpoints = Utils.listenerListToEndPoints(controllerConfig.listeners) + val brokers = servers.map(s => new Broker(s.config.brokerId, endpoints)) val controllerContext = new ControllerContext(zkClient, 6000) controllerContext.liveBrokers = brokers.toSet val controllerChannelManager = new ControllerChannelManager(controllerContext, controllerConfig) diff --git a/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala b/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala index c06ee75..0476a59 100644 --- a/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala +++ b/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala @@ -197,7 +197,7 @@ class LogOffsetTest extends JUnit3Suite with ZooKeeperTestHarness { private def createBrokerConfig(nodeId: Int, port: Int): Properties = { val props = new Properties props.put("broker.id", nodeId.toString) - props.put("port", port.toString) + props.put("listeners", "PLAINTEXT://localhost:" + port.toString) props.put("log.dir", getLogDir.getAbsolutePath) props.put("log.flush.interval.messages", "1") props.put("enable.zookeeper", "false") diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index 94d0028..a7f48a9 100644 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -24,6 +24,7 @@ import java.nio.channels._ import java.util.Random import java.util.Properties +import kafka.utils import org.apache.kafka.common.utils.Utils._ import collection.mutable.ListBuffer @@ -34,7 +35,7 @@ import kafka.server._ import kafka.producer._ import kafka.message._ import kafka.api._ -import kafka.cluster.Broker +import kafka.cluster.{ProtocolType, Broker} import kafka.consumer.{KafkaStream, ConsumerConfig} import kafka.serializer.{StringEncoder, DefaultEncoder, Encoder} import kafka.common.TopicAndPartition @@ -145,7 +146,10 @@ object TestUtils extends Logging { } def getBrokerListStrFromConfigs(configs: Seq[KafkaConfig]): String = { - configs.map(c => formatAddress(c.hostName, c.port)).mkString(",") + configs.map(c => { + val endpoint = Utils.listenerListToEndPoints(c.listeners).find(_.protocolType == ProtocolType.PLAINTEXT).get + formatAddress(endpoint.host, endpoint.port) + }).mkString(",") } /** @@ -155,8 +159,7 @@ object TestUtils extends Logging { enableControlledShutdown: Boolean = true): Properties = { val props = new Properties props.put("broker.id", nodeId.toString) - props.put("host.name", "localhost") - props.put("port", port.toString) + props.put("listeners","PLAINTEXT://localhost:"+port.toString) props.put("log.dir", TestUtils.tempDir().getAbsolutePath) props.put("zookeeper.connect", TestZKUtils.zookeeperConnect) props.put("replica.socket.timeout.ms", "1500") @@ -444,7 +447,7 @@ object TestUtils extends Logging { def createBrokersInZk(zkClient: ZkClient, ids: Seq[Int]): Seq[Broker] = { val brokers = ids.map(id => new Broker(id, "localhost", 6667)) - brokers.foreach(b => ZkUtils.registerBrokerInZk(zkClient, b.id, b.host, b.port, 6000, jmxPort = -1)) + brokers.foreach(b => ZkUtils.registerBrokerInZk(zkClient, b.id, b.endPoints, 6000, jmxPort = -1)) brokers } -- 1.9.3 (Apple Git-50) From 1a01f18779684114b8e688db6816391609817040 Mon Sep 17 00:00:00 2001 From: Gwen Shapira Date: Fri, 5 Dec 2014 12:21:09 -0800 Subject: [PATCH 2/9] changed topicmetadata to include brokerendpoints and fixed few unit tests --- core/src/main/scala/kafka/admin/AdminUtils.scala | 14 +++++++------- .../scala/kafka/api/ConsumerMetadataResponse.scala | 2 +- core/src/main/scala/kafka/api/TopicMetadata.scala | 22 +++++----------------- .../scala/kafka/api/TopicMetadataResponse.scala | 6 +----- core/src/main/scala/kafka/cluster/Broker.scala | 6 +----- .../main/scala/kafka/cluster/BrokerEndPoint.scala | 20 ++++---------------- .../kafka/consumer/ConsumerFetcherManager.scala | 6 +++--- .../main/scala/kafka/javaapi/TopicMetadata.scala | 6 +++--- .../main/scala/kafka/producer/ProducerPool.scala | 4 ++-- .../kafka/server/AbstractFetcherManager.scala | 6 +++--- core/src/main/scala/kafka/server/KafkaApis.scala | 2 +- .../main/scala/kafka/server/MetadataCache.scala | 22 ++++++++++++++-------- .../scala/kafka/server/ReplicaFetcherManager.scala | 4 ++-- .../scala/kafka/server/ReplicaFetcherThread.scala | 6 +++--- .../main/scala/kafka/server/ReplicaManager.scala | 2 +- .../main/scala/kafka/tools/GetOffsetShell.scala | 4 ++-- .../scala/kafka/tools/SimpleConsumerShell.scala | 18 +++++++++--------- core/src/test/resources/log4j.properties | 4 ++-- .../kafka/api/ProducerFailureHandlingTest.scala | 3 +-- .../api/RequestResponseSerializationTest.scala | 12 +++++++----- .../test/scala/unit/kafka/cluster/BrokerTest.scala | 1 - .../unit/kafka/producer/AsyncProducerTest.scala | 8 ++++---- .../unit/kafka/server/LeaderElectionTest.scala | 3 +-- 23 files changed, 77 insertions(+), 104 deletions(-) diff --git a/core/src/main/scala/kafka/admin/AdminUtils.scala b/core/src/main/scala/kafka/admin/AdminUtils.scala index d06775c..7e57b8c 100644 --- a/core/src/main/scala/kafka/admin/AdminUtils.scala +++ b/core/src/main/scala/kafka/admin/AdminUtils.scala @@ -294,7 +294,7 @@ object AdminUtils extends Logging { - private def fetchTopicMetadataFromZk(topic: String, zkClient: ZkClient, cachedBrokerInfo: mutable.HashMap[Int, Broker]): TopicMetadata = { + private def fetchTopicMetadataFromZk(topic: String, zkClient: ZkClient, cachedBrokerInfo: mutable.HashMap[Int, Broker], protocol: ProtocolType = ProtocolType.PLAINTEXT): TopicMetadata = { if(ZkUtils.pathExists(zkClient, ZkUtils.getTopicPath(topic))) { val topicPartitionAssignment = ZkUtils.getPartitionAssignmentForTopics(zkClient, List(topic)).get(topic).get val sortedPartitions = topicPartitionAssignment.toList.sortWith((m1, m2) => m1._1 < m2._1) @@ -305,22 +305,22 @@ object AdminUtils extends Logging { val leader = ZkUtils.getLeaderForPartition(zkClient, topic, partition) debug("replicas = " + replicas + ", in sync replicas = " + inSyncReplicas + ", leader = " + leader) - var leaderInfo: Option[Broker] = None - var replicaInfo: Seq[Broker] = Nil - var isrInfo: Seq[Broker] = Nil + var leaderInfo: Option[BrokerEndpoint] = None + var replicaInfo: Seq[BrokerEndpoint] = Nil + var isrInfo: Seq[BrokerEndpoint] = Nil try { leaderInfo = leader match { case Some(l) => try { - Some(getBrokerInfoFromCache(zkClient, cachedBrokerInfo, List(l)).head) + Some(getBrokerInfoFromCache(zkClient, cachedBrokerInfo, List(l)).head.getBrokerEndPoint(protocol)) } catch { case e: Throwable => throw new LeaderNotAvailableException("Leader not available for partition [%s,%d]".format(topic, partition), e) } case None => throw new LeaderNotAvailableException("No leader exists for partition " + partition) } try { - replicaInfo = getBrokerInfoFromCache(zkClient, cachedBrokerInfo, replicas.map(id => id.toInt)) - isrInfo = getBrokerInfoFromCache(zkClient, cachedBrokerInfo, inSyncReplicas) + replicaInfo = getBrokerInfoFromCache(zkClient, cachedBrokerInfo, replicas.map(id => id.toInt)).map(_.getBrokerEndPoint(protocol)) + isrInfo = getBrokerInfoFromCache(zkClient, cachedBrokerInfo, inSyncReplicas).map(_.getBrokerEndPoint(protocol)) } catch { case e: Throwable => throw new ReplicaNotAvailableException(e) } diff --git a/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala b/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala index d575232..05b4c9a 100644 --- a/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala +++ b/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala @@ -24,7 +24,7 @@ import kafka.common.ErrorMapping object ConsumerMetadataResponse { val CurrentVersion = 0 - private val NoBrokerOpt = Some(BrokerEndpoint(id = -1, host = "", port = -1, protocolType = ProtocolType.PLAINTEXT)) + private val NoBrokerOpt = Some(BrokerEndpoint(id = -1, host = "", port = -1)) def readFrom(buffer: ByteBuffer) = { val correlationId = buffer.getInt diff --git a/core/src/main/scala/kafka/api/TopicMetadata.scala b/core/src/main/scala/kafka/api/TopicMetadata.scala index b1ff35e..9aa9119 100644 --- a/core/src/main/scala/kafka/api/TopicMetadata.scala +++ b/core/src/main/scala/kafka/api/TopicMetadata.scala @@ -28,7 +28,7 @@ object TopicMetadata { val NoLeaderNodeId = -1 - def readFrom(buffer: ByteBuffer, brokers: Map[Int, Broker]): TopicMetadata = { + def readFrom(buffer: ByteBuffer, brokers: Map[Int, BrokerEndpoint]): TopicMetadata = { val errorCode = readShortInRange(buffer, "error code", (-1, Short.MaxValue)) val topic = readShortString(buffer) val numPartitions = readIntInRange(buffer, "number of partitions", (0, Int.MaxValue)) @@ -89,7 +89,7 @@ case class TopicMetadata(topic: String, partitionsMetadata: Seq[PartitionMetadat object PartitionMetadata { - def readFrom(buffer: ByteBuffer, brokers: Map[Int, Broker]): PartitionMetadata = { + def readFrom(buffer: ByteBuffer, brokers: Map[Int, BrokerEndpoint]): PartitionMetadata = { val errorCode = readShortInRange(buffer, "error code", (-1, Short.MaxValue)) val partitionId = readIntInRange(buffer, "partition id", (0, Int.MaxValue)) /* partition id */ val leaderId = buffer.getInt @@ -110,23 +110,11 @@ object PartitionMetadata { } case class PartitionMetadata(partitionId: Int, - val leader: Option[Broker], - replicas: Seq[Broker], - isr: Seq[Broker] = Seq.empty, + val leader: Option[BrokerEndpoint], + replicas: Seq[BrokerEndpoint], + isr: Seq[BrokerEndpoint] = Seq.empty, errorCode: Short = ErrorMapping.NoError) extends Logging { - /*def this(partitionId: Int, - leader: Option[BrokerEndPoint], - replicas: Seq[BrokerEndPoint], - isr: Seq[BrokerEndPoint] = Seq.empty, - errorCode: Short = ErrorMapping.NoError) = { - this(partitionId, - Some(if (leader.isDefined) Broker.createBroker(leader.get) else Broker.noBroker()), - replicas.map(endpoint=>Broker.createBroker(endpoint)), - isr.map(endpoint=>Broker.createBroker(endpoint)), - errorCode) - }*/ - def sizeInBytes: Int = { 2 /* error code */ + 4 /* partition id */ + diff --git a/core/src/main/scala/kafka/api/TopicMetadataResponse.scala b/core/src/main/scala/kafka/api/TopicMetadataResponse.scala index db9f88e..70450be 100644 --- a/core/src/main/scala/kafka/api/TopicMetadataResponse.scala +++ b/core/src/main/scala/kafka/api/TopicMetadataResponse.scala @@ -26,11 +26,7 @@ object TopicMetadataResponse { val correlationId = buffer.getInt val brokerCount = buffer.getInt val brokers = (0 until brokerCount).map(_ => BrokerEndpoint.readFrom(buffer)) - /* The broker list we are using for the TopicMetadataResponse is a collection of end-points (so clients can use them for connections) - * But partitionMetadata has multiple use-cases, some of them seem to require actual brokers - * So converting endpoints to broker objects here to preserve the partitionMetadata as is. - * TODO: check if we can use endpoints in partitionMetadata */ - val brokerMap = brokers.map(b => (b.id, Broker.createBroker(b))).toMap + val brokerMap = brokers.map(b => (b.id, b)).toMap val topicCount = buffer.getInt val topicsMetadata = (0 until topicCount).map(_ => TopicMetadata.readFrom(buffer, brokerMap)) new TopicMetadataResponse(brokers, topicsMetadata, correlationId) diff --git a/core/src/main/scala/kafka/cluster/Broker.scala b/core/src/main/scala/kafka/cluster/Broker.scala index 684bc32..a84a34e 100644 --- a/core/src/main/scala/kafka/cluster/Broker.scala +++ b/core/src/main/scala/kafka/cluster/Broker.scala @@ -67,10 +67,6 @@ object Broker { } } - def createBroker(endPoint: BrokerEndpoint) = { - Broker(endPoint.id,List(EndPoint(endPoint.host, endPoint.port,endPoint.protocolType))) - } - def noBroker() = { Broker(-1,List()) } @@ -121,7 +117,7 @@ case class Broker(id: Int, endPoints: Seq[EndPoint]) { def getBrokerEndPoint(protocolType: ProtocolType): BrokerEndpoint = { val endpoint = endPoints.map((endpoint)=>(endpoint.protocolType,endpoint)).toMap.get(protocolType) endpoint match { - case Some(endpoint) => new BrokerEndpoint(id,endpoint.host,endpoint.port,endpoint.protocolType) + case Some(endpoint) => new BrokerEndpoint(id,endpoint.host,endpoint.port) case None => throw new BrokerEndPointNotAvailableException("End point %s not found for broker %d".format(protocolType,id)) } diff --git a/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala b/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala index bf6f406..90ea612 100644 --- a/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala +++ b/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala @@ -9,7 +9,7 @@ import kafka.cluster.ProtocolType._ object BrokerEndpoint { def createBrokerEndPoint(brokerId: Int, connectionString: String): BrokerEndpoint = { val endPoint = EndPoint.createEndPoint(connectionString) - new BrokerEndpoint(brokerId,endPoint.host,endPoint.port.toInt,endPoint.protocolType) + new BrokerEndpoint(brokerId,endPoint.host,endPoint.port.toInt) } /** @@ -23,28 +23,16 @@ object BrokerEndpoint { val brokerId = buffer.getInt() val host = readShortString(buffer) val port = buffer.getInt() - BrokerEndpoint(brokerId,host,port,null) + BrokerEndpoint(brokerId,host,port) } } // Utility class, representing a particular method of connecting to a broker // Mostly to be used by clients // This is not a broker and is not stored in ZooKeeper -case class BrokerEndpoint(id: Int, host: String, port: Int, protocolType: ProtocolType) { +case class BrokerEndpoint(id: Int, host: String, port: Int) { - def connectionString(): String = formatEndpoint(host,port,protocolType) - - /** - * Formats broker endpoint as "channel://host:port" address string, - * surrounding IPv6 addresses with braces '[', ']' - * @param host hostname - * @param port port number - * @param protocolType String representing channel type (plain, SSL, KRB) - * @return address string - */ - def formatEndpoint(host: String, port: Integer, protocolType: ProtocolType): String = { - return protocolType + "://" + formatAddress(host, port) - } + def connectionString(): String = formatAddress(host,port) def writeTo(buffer: ByteBuffer): Unit = { buffer.putInt(id) diff --git a/core/src/main/scala/kafka/consumer/ConsumerFetcherManager.scala b/core/src/main/scala/kafka/consumer/ConsumerFetcherManager.scala index c6700d2..8c7accc 100644 --- a/core/src/main/scala/kafka/consumer/ConsumerFetcherManager.scala +++ b/core/src/main/scala/kafka/consumer/ConsumerFetcherManager.scala @@ -53,7 +53,7 @@ class ConsumerFetcherManager(private val consumerIdString: String, private class LeaderFinderThread(name: String) extends ShutdownableThread(name) { // thread responsible for adding the fetcher to the right broker when leader is available override def doWork() { - val leaderForPartitionsMap = new HashMap[TopicAndPartition, Broker] + val leaderForPartitionsMap = new HashMap[TopicAndPartition, BrokerEndpoint] lock.lock() try { while (noLeaderPartitionSet.isEmpty) { @@ -114,10 +114,10 @@ class ConsumerFetcherManager(private val consumerIdString: String, } } - override def createFetcherThread(fetcherId: Int, sourceBroker: Broker): AbstractFetcherThread = { + override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndpoint): AbstractFetcherThread = { new ConsumerFetcherThread( "ConsumerFetcherThread-%s-%d-%d".format(consumerIdString, fetcherId, sourceBroker.id), - config, sourceBroker.getBrokerEndPoint(config.securityProtocol), partitionMap, this) + config, sourceBroker, partitionMap, this) } def startConnections(topicInfos: Iterable[PartitionTopicInfo], cluster: Cluster) { diff --git a/core/src/main/scala/kafka/javaapi/TopicMetadata.scala b/core/src/main/scala/kafka/javaapi/TopicMetadata.scala index 255a9f7..24defbc 100644 --- a/core/src/main/scala/kafka/javaapi/TopicMetadata.scala +++ b/core/src/main/scala/kafka/javaapi/TopicMetadata.scala @@ -52,17 +52,17 @@ class TopicMetadata(private val underlying: kafka.api.TopicMetadata) { class PartitionMetadata(private val underlying: kafka.api.PartitionMetadata) { def partitionId: Int = underlying.partitionId - def leader: Broker = { + def leader: BrokerEndpoint = { import kafka.javaapi.Implicits._ underlying.leader } - def replicas: java.util.List[Broker] = { + def replicas: java.util.List[BrokerEndpoint] = { import JavaConversions._ underlying.replicas } - def isr: java.util.List[Broker] = { + def isr: java.util.List[BrokerEndpoint] = { import JavaConversions._ underlying.isr } diff --git a/core/src/main/scala/kafka/producer/ProducerPool.scala b/core/src/main/scala/kafka/producer/ProducerPool.scala index 72686fc..1ea8752 100644 --- a/core/src/main/scala/kafka/producer/ProducerPool.scala +++ b/core/src/main/scala/kafka/producer/ProducerPool.scala @@ -48,8 +48,8 @@ class ProducerPool(val config: ProducerConfig) extends Logging { topicMetadata.foreach(tmd => { tmd.partitionsMetadata.foreach(pmd => { if(pmd.leader.isDefined) { - val endpoint = pmd.leader.get.endPoints.head - newBrokers += BrokerEndpoint(pmd.leader.get.id, endpoint.host,endpoint.port,config.securityProtocol) + val endpoint = pmd.leader.get + newBrokers += BrokerEndpoint(pmd.leader.get.id, endpoint.host,endpoint.port) } }) }) diff --git a/core/src/main/scala/kafka/server/AbstractFetcherManager.scala b/core/src/main/scala/kafka/server/AbstractFetcherManager.scala index 36f054e..1a5b0f9 100644 --- a/core/src/main/scala/kafka/server/AbstractFetcherManager.scala +++ b/core/src/main/scala/kafka/server/AbstractFetcherManager.scala @@ -68,7 +68,7 @@ abstract class AbstractFetcherManager(protected val name: String, clientId: Stri } // to be defined in subclass to create a specific fetcher - def createFetcherThread(fetcherId: Int, sourceBroker: Broker): AbstractFetcherThread + def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndpoint): AbstractFetcherThread def addFetcherForPartitions(partitionAndOffsets: Map[TopicAndPartition, BrokerAndInitialOffset]) { mapLock synchronized { @@ -126,6 +126,6 @@ abstract class AbstractFetcherManager(protected val name: String, clientId: Stri } } -case class BrokerAndFetcherId(broker: Broker, fetcherId: Int) +case class BrokerAndFetcherId(broker: BrokerEndpoint, fetcherId: Int) -case class BrokerAndInitialOffset(broker: Broker, initOffset: Long) \ No newline at end of file +case class BrokerAndInitialOffset(broker: BrokerEndpoint, initOffset: Long) \ No newline at end of file diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 9b3697c..4a7472a 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -424,7 +424,7 @@ class KafkaApis(val requestChannel: RequestChannel, val response = offsetsTopicMetadata.partitionsMetadata.find(_.partitionId == partition).map { partitionMetadata => partitionMetadata.leader.map { leader => - ConsumerMetadataResponse(Some(leader.getBrokerEndPoint(securityProtocol)), ErrorMapping.NoError, consumerMetadataRequest.correlationId) + ConsumerMetadataResponse(Some(leader), ErrorMapping.NoError, consumerMetadataRequest.correlationId) }.getOrElse(errorResponse) }.getOrElse(errorResponse) diff --git a/core/src/main/scala/kafka/server/MetadataCache.scala b/core/src/main/scala/kafka/server/MetadataCache.scala index bf81a1a..3df176c 100644 --- a/core/src/main/scala/kafka/server/MetadataCache.scala +++ b/core/src/main/scala/kafka/server/MetadataCache.scala @@ -17,9 +17,12 @@ package kafka.server +import kafka.cluster.ProtocolType.ProtocolType +import kafka.cluster.ProtocolType.ProtocolType + import scala.collection.{Seq, Set, mutable} import kafka.api._ -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, ProtocolType, Broker} import java.util.concurrent.locks.ReentrantReadWriteLock import kafka.utils.Utils._ import kafka.common.{ErrorMapping, ReplicaNotAvailableException, LeaderNotAvailableException} @@ -36,7 +39,7 @@ private[server] class MetadataCache { private var aliveBrokers: Map[Int, Broker] = Map() private val partitionMetadataLock = new ReentrantReadWriteLock() - def getTopicMetadata(topics: Set[String]) = { + def getTopicMetadata(topics: Set[String], protocol: ProtocolType = ProtocolType.PLAINTEXT) = { val isAllTopics = topics.isEmpty val topicsRequested = if(isAllTopics) cache.keySet else topics val topicResponses: mutable.ListBuffer[TopicMetadata] = new mutable.ListBuffer[TopicMetadata] @@ -47,18 +50,21 @@ private[server] class MetadataCache { val partitionMetadata = partitionStateInfos.map { case (partitionId, partitionState) => val replicas = partitionState.allReplicas - val replicaInfo: Seq[Broker] = replicas.map(aliveBrokers.getOrElse(_, null)).filter(_ != null).toSeq - var leaderInfo: Option[Broker] = None - var isrInfo: Seq[Broker] = Nil + val replicaInfo: Seq[BrokerEndpoint] = replicas.map(aliveBrokers.getOrElse(_, null)).filter(_ != null).toSeq.map(_.getBrokerEndPoint(protocol)) + var leaderInfo: Option[BrokerEndpoint] = None + var leaderBrokerInfo: Option[Broker] = None + var isrInfo: Seq[BrokerEndpoint] = Nil val leaderIsrAndEpoch = partitionState.leaderIsrAndControllerEpoch val leader = leaderIsrAndEpoch.leaderAndIsr.leader val isr = leaderIsrAndEpoch.leaderAndIsr.isr val topicPartition = TopicAndPartition(topic, partitionId) try { - leaderInfo = aliveBrokers.get(leader) - if (!leaderInfo.isDefined) + leaderBrokerInfo = aliveBrokers.get(leader) + if (!leaderBrokerInfo.isDefined) throw new LeaderNotAvailableException("Leader not available for %s".format(topicPartition)) - isrInfo = isr.map(aliveBrokers.getOrElse(_, null)).filter(_ != null) + else + leaderInfo = Some(leaderBrokerInfo.get.getBrokerEndPoint(protocol)) + isrInfo = isr.map(aliveBrokers.getOrElse(_, null)).filter(_ != null).map(_.getBrokerEndPoint(protocol)) if (replicaInfo.size < replicas.size) throw new ReplicaNotAvailableException("Replica information not available for following brokers: " + replicas.filterNot(replicaInfo.map(_.id).contains(_)).mkString(",")) diff --git a/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala b/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala index 351dbba..4d0f998 100644 --- a/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala @@ -17,13 +17,13 @@ package kafka.server -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, Broker} class ReplicaFetcherManager(private val brokerConfig: KafkaConfig, private val replicaMgr: ReplicaManager) extends AbstractFetcherManager("ReplicaFetcherManager on broker " + brokerConfig.brokerId, "Replica", brokerConfig.numReplicaFetchers) { - override def createFetcherThread(fetcherId: Int, sourceBroker: Broker): AbstractFetcherThread = { + override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndpoint): AbstractFetcherThread = { new ReplicaFetcherThread("ReplicaFetcherThread-%d-%d".format(fetcherId, sourceBroker.id), sourceBroker, brokerConfig, replicaMgr) } diff --git a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala index 5cf4cfd..b48e65e 100644 --- a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala +++ b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala @@ -18,19 +18,19 @@ package kafka.server import kafka.admin.AdminUtils -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, Broker} import kafka.log.LogConfig import kafka.message.ByteBufferMessageSet import kafka.api.{OffsetRequest, FetchResponsePartitionData} import kafka.common.{KafkaStorageException, TopicAndPartition} class ReplicaFetcherThread(name:String, - sourceBroker: Broker, + sourceBroker: BrokerEndpoint, brokerConfig: KafkaConfig, replicaMgr: ReplicaManager) extends AbstractFetcherThread(name = name, clientId = name, - sourceBroker = sourceBroker.getBrokerEndPoint(brokerConfig.securityProtocol), + sourceBroker = sourceBroker, socketTimeout = brokerConfig.replicaSocketTimeoutMs, socketBufferSize = brokerConfig.replicaSocketReceiveBufferBytes, fetchSize = brokerConfig.replicaFetchMaxBytes, diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index e58fbb9..e7ad02b 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -678,7 +678,7 @@ class ReplicaManager(val config: KafkaConfig, // we do not need to check if the leader exists again since this has been done at the beginning of this process val partitionsToMakeFollowerWithLeaderAndOffset = partitionsToMakeFollower.map(partition => new TopicAndPartition(partition) -> BrokerAndInitialOffset( - leaders.find(_.id == partition.leaderReplicaIdOpt.get).get, + leaders.find(_.id == partition.leaderReplicaIdOpt.get).get.getBrokerEndPoint(config.securityProtocol), partition.getReplica().get.logEndOffset.messageOffset)).toMap replicaFetcherManager.addFetcherForPartitions(partitionsToMakeFollowerWithLeaderAndOffset) diff --git a/core/src/main/scala/kafka/tools/GetOffsetShell.scala b/core/src/main/scala/kafka/tools/GetOffsetShell.scala index 46078b7..1596516 100644 --- a/core/src/main/scala/kafka/tools/GetOffsetShell.scala +++ b/core/src/main/scala/kafka/tools/GetOffsetShell.scala @@ -94,8 +94,8 @@ object GetOffsetShell { case Some(metadata) => metadata.leader match { case Some(leader) => - val consumer = new SimpleConsumer(leader.getBrokerEndPoint(ProtocolType.PLAINTEXT).host, - leader.getBrokerEndPoint(ProtocolType.PLAINTEXT).port, 10000, 100000, clientId) + val consumer = new SimpleConsumer(leader.host, + leader.port, 10000, 100000, clientId) val topicAndPartition = TopicAndPartition(topic, partitionId) val request = OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(time, nOffsets))) val offsets = consumer.getOffsetsBefore(request).partitionErrorAndOffsets(topicAndPartition).offsets diff --git a/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala b/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala index 77c4900..ac86762 100644 --- a/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala +++ b/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala @@ -22,7 +22,7 @@ import kafka.utils._ import kafka.consumer._ import kafka.client.ClientUtils import kafka.api.{OffsetRequest, FetchRequestBuilder, Request} -import kafka.cluster.{ProtocolType, Broker} +import kafka.cluster.{BrokerEndpoint, ProtocolType, Broker} import scala.collection.JavaConversions._ import kafka.common.TopicAndPartition @@ -142,8 +142,8 @@ object SimpleConsumerShell extends Logging { } // validating replica id and initializing target broker - var fetchTargetBroker: Broker = null - var replicaOpt: Option[Broker] = null + var fetchTargetBroker: BrokerEndpoint = null + var replicaOpt: Option[BrokerEndpoint] = null if(replicaId == UseLeaderReplica) { replicaOpt = partitionMetadataOpt.get.leader if(!replicaOpt.isDefined) { @@ -167,8 +167,8 @@ object SimpleConsumerShell extends Logging { System.exit(1) } if (startingOffset < 0) { - val simpleConsumer = new SimpleConsumer(fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).host, - fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).port, + val simpleConsumer = new SimpleConsumer(fetchTargetBroker.host, + fetchTargetBroker.port, ConsumerConfig.SocketTimeout, ConsumerConfig.SocketBufferSize, clientId) try { @@ -191,10 +191,10 @@ object SimpleConsumerShell extends Logging { val replicaString = if(replicaId > 0) "leader" else "replica" info("Starting simple consumer shell to partition [%s, %d], %s [%d], host and port: [%s, %d], from offset [%d]" .format(topic, partitionId, replicaString, replicaId, - fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).host, - fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).port, startingOffset)) - val simpleConsumer = new SimpleConsumer(fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).host, - fetchTargetBroker.getBrokerEndPoint(ProtocolType.PLAINTEXT).port, + fetchTargetBroker.host, + fetchTargetBroker.port, startingOffset)) + val simpleConsumer = new SimpleConsumer(fetchTargetBroker.host, + fetchTargetBroker.port, 10000, 64*1024, clientId) val thread = Utils.newThread("kafka-simpleconsumer-shell", new Runnable() { def run() { diff --git a/core/src/test/resources/log4j.properties b/core/src/test/resources/log4j.properties index 1b7d5d8..1883f97 100644 --- a/core/src/test/resources/log4j.properties +++ b/core/src/test/resources/log4j.properties @@ -18,8 +18,8 @@ log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n -log4j.logger.kafka=ERROR -log4j.logger.org.apache.kafka=ERROR +log4j.logger.kafka=TRACE +log4j.logger.org.apache.kafka=TRACE # zkclient can be verbose, during debugging it is common to adjust is separately log4j.logger.org.I0Itec.zkclient.ZkClient=WARN diff --git a/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala b/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala index 66c6c4e..8bbd7c0 100644 --- a/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala +++ b/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala @@ -267,8 +267,7 @@ class ProducerFailureHandlingTest extends KafkaServerTestHarness { server.shutdown() server.awaitShutdown() server.startup - - Thread.sleep(2000) + Thread.sleep(5000) } // Make sure the producer do not see any exception diff --git a/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala b/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala index d69d02a..ea70e2c 100644 --- a/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala +++ b/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala @@ -83,10 +83,12 @@ object SerializationTestUtils { private val brokers = List(new Broker(0, List(EndPoint("localhost", 1011, ProtocolType.PLAINTEXT))), new Broker(1, List(EndPoint("localhost", 1012, ProtocolType.PLAINTEXT))), new Broker(2, List(EndPoint("localhost", 1013, ProtocolType.PLAINTEXT)))) - private val partitionMetaData0 = new PartitionMetadata(0, Some(brokers.head), replicas = brokers, isr = brokers, errorCode = 0) - private val partitionMetaData1 = new PartitionMetadata(1, Some(brokers.head), replicas = brokers, isr = brokers.tail, errorCode = 1) - private val partitionMetaData2 = new PartitionMetadata(2, Some(brokers.head), replicas = brokers, isr = brokers, errorCode = 2) - private val partitionMetaData3 = new PartitionMetadata(3, Some(brokers.head), replicas = brokers, isr = brokers.tail.tail, errorCode = 3) + private val brokerEndpoints = brokers.map(_.getBrokerEndPoint(ProtocolType.PLAINTEXT)) + + private val partitionMetaData0 = new PartitionMetadata(0, Some(brokerEndpoints.head), replicas = brokerEndpoints, isr = brokerEndpoints, errorCode = 0) + private val partitionMetaData1 = new PartitionMetadata(1, Some(brokerEndpoints.head), replicas = brokerEndpoints, isr = brokerEndpoints.tail, errorCode = 1) + private val partitionMetaData2 = new PartitionMetadata(2, Some(brokerEndpoints.head), replicas = brokerEndpoints, isr = brokerEndpoints, errorCode = 2) + private val partitionMetaData3 = new PartitionMetadata(3, Some(brokerEndpoints.head), replicas = brokerEndpoints, isr = brokerEndpoints.tail.tail, errorCode = 3) private val partitionMetaDataSeq = Seq(partitionMetaData0, partitionMetaData1, partitionMetaData2, partitionMetaData3) private val topicmetaData1 = new TopicMetadata(topic1, partitionMetaDataSeq) private val topicmetaData2 = new TopicMetadata(topic2, partitionMetaDataSeq) @@ -150,7 +152,7 @@ object SerializationTestUtils { } def createTestTopicMetadataResponse: TopicMetadataResponse = { - new TopicMetadataResponse(List(BrokerEndpoint(1,"localhost",9092,null)), Seq(topicmetaData1, topicmetaData2), 1) + new TopicMetadataResponse(brokers.map(_.getBrokerEndPoint(ProtocolType.PLAINTEXT)).toVector, Seq(topicmetaData1, topicmetaData2), 1) } def createTestOffsetCommitRequestV1: OffsetCommitRequest = { diff --git a/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala b/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala index 289d87b..03c6522 100644 --- a/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala @@ -85,7 +85,6 @@ class BrokerTest extends JUnit3Suite with Logging { val endpoint = BrokerEndpoint.createBrokerEndPoint(1,connectionString) assert(endpoint.host == "localhost") assert(endpoint.port == 9092) - assert(endpoint.protocolType == ProtocolType.PLAINTEXT) } diff --git a/core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala b/core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala index 1db6ac3..450aa59 100644 --- a/core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala +++ b/core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala @@ -23,7 +23,7 @@ import junit.framework.Assert._ import org.easymock.EasyMock import org.junit.Test import kafka.api._ -import kafka.cluster.Broker +import kafka.cluster.{BrokerEndpoint, Broker} import kafka.common._ import kafka.message._ import kafka.producer.async._ @@ -163,8 +163,8 @@ class AsyncProducerTest extends JUnit3Suite { val props = new Properties() props.put("metadata.broker.list", TestUtils.getBrokerListStrFromConfigs(configs)) - val broker1 = new Broker(0, "localhost", 9092) - val broker2 = new Broker(1, "localhost", 9093) + val broker1 = new BrokerEndpoint(0, "localhost", 9092) + val broker2 = new BrokerEndpoint(1, "localhost", 9093) // form expected partitions metadata val partition1Metadata = new PartitionMetadata(0, Some(broker1), List(broker1, broker2)) @@ -467,7 +467,7 @@ class AsyncProducerTest extends JUnit3Suite { } private def getTopicMetadata(topic: String, partition: Seq[Int], brokerId: Int, brokerHost: String, brokerPort: Int): TopicMetadata = { - val broker1 = new Broker(brokerId, brokerHost, brokerPort) + val broker1 = new BrokerEndpoint(brokerId, brokerHost, brokerPort) new TopicMetadata(topic, partition.map(new PartitionMetadata(_, Some(broker1), List(broker1)))) } diff --git a/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala b/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala index bc6bdd6..79dff89 100644 --- a/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala +++ b/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala @@ -118,8 +118,7 @@ class LeaderElectionTest extends JUnit3Suite with ZooKeeperTestHarness { // start another controller val controllerId = 2 val controllerConfig = new KafkaConfig(TestUtils.createBrokerConfig(controllerId, TestUtils.choosePort())) - val endpoints = Utils.listenerListToEndPoints(controllerConfig.listeners) - val brokers = servers.map(s => new Broker(s.config.brokerId, endpoints)) + val brokers = servers.map(s => new Broker(s.config.brokerId, Utils.listenerListToEndPoints(s.config.listeners))) val controllerContext = new ControllerContext(zkClient, 6000) controllerContext.liveBrokers = brokers.toSet val controllerChannelManager = new ControllerChannelManager(controllerContext, controllerConfig) -- 1.9.3 (Apple Git-50) From f3437ce5713f58b485698d5a50c1af64e40f41bc Mon Sep 17 00:00:00 2001 From: Gwen Shapira Date: Sat, 6 Dec 2014 11:28:06 -0800 Subject: [PATCH 3/9] fixing systest and support for binding to default address --- config/server.properties | 4 +++- core/src/main/scala/kafka/cluster/EndPoint.scala | 3 ++- core/src/main/scala/kafka/server/KafkaConfig.scala | 2 +- core/src/test/scala/unit/kafka/KafkaConfigTest.scala | 8 ++++---- core/src/test/scala/unit/kafka/cluster/BrokerTest.scala | 12 +++++++++--- system_test/utils/kafka_system_test_utils.py | 1 + 6 files changed, 20 insertions(+), 10 deletions(-) diff --git a/config/server.properties b/config/server.properties index b0e4496..7dbc86a 100644 --- a/config/server.properties +++ b/config/server.properties @@ -21,8 +21,10 @@ broker.id=0 ############################# Socket Server Settings ############################# +listeners=PLAINTEXT://:9092 + # The port the socket server listens on -port=9092 +#port=9092 # Hostname the broker will bind to. If not set, the server will bind to all interfaces #host.name=localhost diff --git a/core/src/main/scala/kafka/cluster/EndPoint.scala b/core/src/main/scala/kafka/cluster/EndPoint.scala index 085b333..1b5fe8f 100644 --- a/core/src/main/scala/kafka/cluster/EndPoint.scala +++ b/core/src/main/scala/kafka/cluster/EndPoint.scala @@ -33,8 +33,9 @@ object EndPoint { } def createEndPoint(connectionString: String): EndPoint = { - val uriParseExp = """^(.*)://([0-9a-z\-.]+):([0-9]+)""".r + val uriParseExp = """^(.*)://([0-9a-z\-.]*):([0-9]+)""".r connectionString match { + case uriParseExp(protocol,"",port) => new EndPoint(null,port.toInt,ProtocolType.withName(protocol)) case uriParseExp(protocol,host,port) => new EndPoint(host,port.toInt,ProtocolType.withName(protocol)) case _ => throw new KafkaException("Unable to parse " + connectionString + " to a broker endpoint") } diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 5a9bac7..0f42303 100644 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -95,7 +95,7 @@ class KafkaConfig private (val props: VerifiableProperties) extends ZKConfig(pro /* Listener List - Comma-separated list of URIs we will listen on and their protocols. * Specify hostname as 0.0.0.0 to bind to all interfaces * Leave hostname empty to bind to default interface */ - val listeners: String = props.getString("listeners", "PLAINTEXT://0.0.0.0:6667") + val listeners: String = props.getString("listeners", "PLAINTEXT://:6667") /* Listeners to publish to ZooKeeper for clients to use, if different than the listeners above. * In IaaS environments, this may need to be different from the interface to which the broker binds. diff --git a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala index 4d36b8b..bc4aef3 100644 --- a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala @@ -65,14 +65,14 @@ class KafkaTest { assertEquals(2, config2.brokerId) // We should be also able to set completely new property - val config3 = Kafka.getKafkaConfigFromArgs(Array(propertiesFile, "--override", "port=1987")) + val config3 = Kafka.getKafkaConfigFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact")) assertEquals(1, config3.brokerId) - assertEquals(1987, config3.port) + assertEquals("compact", config3.logCleanupPolicy) // We should be also able to set several properties - val config4 = Kafka.getKafkaConfigFromArgs(Array(propertiesFile, "--override", "port=1987", "--override", "broker.id=2")) + val config4 = Kafka.getKafkaConfigFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact", "--override", "broker.id=2")) assertEquals(2, config4.brokerId) - assertEquals(1987, config4.port) + assertEquals("compact", config4.logCleanupPolicy) } @Test(expected = classOf[ExitCalled]) diff --git a/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala b/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala index 03c6522..7d59037 100644 --- a/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala @@ -80,14 +80,20 @@ class BrokerTest extends JUnit3Suite with Logging { } @Test - def endpointFromURI() = { - val connectionString = "PLAINTEXT://localhost:9092" - val endpoint = BrokerEndpoint.createBrokerEndPoint(1,connectionString) + def testEndpointFromURI() = { + var connectionString = "PLAINTEXT://localhost:9092" + var endpoint = BrokerEndpoint.createBrokerEndPoint(1,connectionString) assert(endpoint.host == "localhost") assert(endpoint.port == 9092) + // also test for default bind + connectionString = "PLAINTEXT://:9092" + endpoint = BrokerEndpoint.createBrokerEndPoint(1,connectionString) + assert(endpoint.host == null) + assert(endpoint.port == 9092) } + } diff --git a/system_test/utils/kafka_system_test_utils.py b/system_test/utils/kafka_system_test_utils.py index 41d511c..e4b5d80 100644 --- a/system_test/utils/kafka_system_test_utils.py +++ b/system_test/utils/kafka_system_test_utils.py @@ -436,6 +436,7 @@ def generate_overriden_props_files(testsuitePathname, testcaseEnv, systemTestEnv addedCSVConfig["kafka.metrics.polling.interval.secs"] = "5" addedCSVConfig["kafka.metrics.reporters"] = "kafka.metrics.KafkaCSVMetricsReporter" addedCSVConfig["kafka.csv.metrics.reporter.enabled"] = "true" + addedCSVConfig["listeners"] = "PLAINTEXT://:"+port if brokerVersion == "0.7": addedCSVConfig["brokerid"] = tcCfg["brokerid"] -- 1.9.3 (Apple Git-50) From 6a18cd8c929aa1b6156f17b40e1511f33de0a424 Mon Sep 17 00:00:00 2001 From: Gwen Shapira Date: Wed, 24 Dec 2014 21:34:01 -0800 Subject: [PATCH 4/9] fixed system tests --- clients/src/main/java/org/apache/kafka/common/utils/Utils.java | 6 +++--- config/server.properties | 2 +- core/src/main/scala/kafka/client/ClientUtils.scala | 2 +- system_test/utils/kafka_system_test_utils.py | 10 +++++----- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java index 527dd0f..e6152f4 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java @@ -24,7 +24,7 @@ import org.apache.kafka.common.KafkaException; public class Utils { - private static final Pattern HOST_PORT_PATTERN = Pattern.compile("\\[?(.+?)\\]?:(\\d+)"); + private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^(.*)://([0-9a-z\\-.]*):([0-9]+)"); public static String NL = System.getProperty("line.separator"); @@ -288,7 +288,7 @@ public class Utils { */ public static String getHost(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); - return matcher.matches() ? matcher.group(1) : null; + return matcher.matches() ? matcher.group(2) : null; } /** @@ -298,7 +298,7 @@ public class Utils { */ public static Integer getPort(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); - return matcher.matches() ? Integer.parseInt(matcher.group(2)) : null; + return matcher.matches() ? Integer.parseInt(matcher.group(3)) : null; } /** diff --git a/config/server.properties b/config/server.properties index 7dbc86a..897ce78 100644 --- a/config/server.properties +++ b/config/server.properties @@ -21,7 +21,7 @@ broker.id=0 ############################# Socket Server Settings ############################# -listeners=PLAINTEXT://:9092 +listeners=PLAINTEXT://0.0.0.0:9092 # The port the socket server listens on #port=9092 diff --git a/core/src/main/scala/kafka/client/ClientUtils.scala b/core/src/main/scala/kafka/client/ClientUtils.scala index 5acc878..2d53350 100644 --- a/core/src/main/scala/kafka/client/ClientUtils.scala +++ b/core/src/main/scala/kafka/client/ClientUtils.scala @@ -102,7 +102,7 @@ object ClientUtils extends Logging{ val brokersStr = Utils.parseCsvList(brokerListStr) brokersStr.zipWithIndex.map { case (address, brokerId) => - BrokerEndpoint.createBrokerEndPoint(brokerId,protocolType.toString + "://" + address) + BrokerEndpoint.createBrokerEndPoint(brokerId,address) } } diff --git a/system_test/utils/kafka_system_test_utils.py b/system_test/utils/kafka_system_test_utils.py index e4b5d80..a0e82c2 100644 --- a/system_test/utils/kafka_system_test_utils.py +++ b/system_test/utils/kafka_system_test_utils.py @@ -386,14 +386,14 @@ def generate_overriden_props_files(testsuitePathname, testcaseEnv, systemTestEnv if clusterName == "source": if ( len(testcaseEnv.userDefinedEnvVarDict["sourceBrokerList"]) == 0 ): - testcaseEnv.userDefinedEnvVarDict["sourceBrokerList"] = hostname + ":" + port + testcaseEnv.userDefinedEnvVarDict["sourceBrokerList"] = "PLAINTEXT://" + hostname + ":" + port else: - testcaseEnv.userDefinedEnvVarDict["sourceBrokerList"] += "," + hostname + ":" + port + testcaseEnv.userDefinedEnvVarDict["sourceBrokerList"] += "," + "PLAINTEXT://" + hostname + ":" + port elif clusterName == "target": if ( len(testcaseEnv.userDefinedEnvVarDict["targetBrokerList"]) == 0 ): - testcaseEnv.userDefinedEnvVarDict["targetBrokerList"] = hostname + ":" + port + testcaseEnv.userDefinedEnvVarDict["targetBrokerList"] = "PLAINTEXT://" + hostname + ":" + port else: - testcaseEnv.userDefinedEnvVarDict["targetBrokerList"] += "," + hostname + ":" + port + testcaseEnv.userDefinedEnvVarDict["targetBrokerList"] += "," + "PLAINTEXT://" + hostname + ":" + port else: logger.error("Invalid cluster name: " + clusterName, extra=d) raise Exception("Invalid cluster name : " + clusterName) @@ -436,7 +436,7 @@ def generate_overriden_props_files(testsuitePathname, testcaseEnv, systemTestEnv addedCSVConfig["kafka.metrics.polling.interval.secs"] = "5" addedCSVConfig["kafka.metrics.reporters"] = "kafka.metrics.KafkaCSVMetricsReporter" addedCSVConfig["kafka.csv.metrics.reporter.enabled"] = "true" - addedCSVConfig["listeners"] = "PLAINTEXT://:"+port + addedCSVConfig["listeners"] = "PLAINTEXT://localhost:"+tcCfg["port"] if brokerVersion == "0.7": addedCSVConfig["brokerid"] = tcCfg["brokerid"] -- 1.9.3 (Apple Git-50) From 10f4a8958e4800204f8cebc96f69b01d01fe1714 Mon Sep 17 00:00:00 2001 From: Gwen Shapira Date: Wed, 24 Dec 2014 22:43:17 -0800 Subject: [PATCH 5/9] fix default address binding and ipv6 support --- .../main/java/org/apache/kafka/common/utils/Utils.java | 2 +- .../org/apache/kafka/common/utils/ClientUtilsTest.java | 8 ++++---- .../java/org/apache/kafka/common/utils/UtilsTest.java | 16 ++++++++-------- config/server.properties | 2 +- core/src/main/scala/kafka/server/KafkaHealthcheck.scala | 8 +++++++- 5 files changed, 21 insertions(+), 15 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java index e6152f4..bd38d23 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java @@ -24,7 +24,7 @@ import org.apache.kafka.common.KafkaException; public class Utils { - private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^(.*)://([0-9a-z\\-.]*):([0-9]+)"); + private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^(.*)://\\[?([0-9a-z\\-.:]*)\\]?:([0-9]+)"); public static String NL = System.getProperty("line.separator"); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/ClientUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/ClientUtilsTest.java index 6e37ea5..c5d70b7 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/ClientUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/ClientUtilsTest.java @@ -25,10 +25,10 @@ public class ClientUtilsTest { @Test public void testParseAndValidateAddresses() { - check("127.0.0.1:8000"); - check("mydomain.com:8080"); - check("[::1]:8000"); - check("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234", "mydomain.com:10000"); + check("PLAINTEXT://127.0.0.1:8000"); + check("PLAINTEXT://mydomain.com:8080"); + check("PLAINTEXT://[::1]:8000"); + check("PLAINTEXT://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234", "PLAINTEXT://mydomain.com:10000"); } @Test(expected = ConfigException.class) diff --git a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java index a39fab5..ad6529e 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java @@ -27,18 +27,18 @@ public class UtilsTest { @Test public void testGetHost() { - assertEquals("127.0.0.1", getHost("127.0.0.1:8000")); - assertEquals("mydomain.com", getHost("mydomain.com:8080")); - assertEquals("::1", getHost("[::1]:1234")); - assertEquals("2001:db8:85a3:8d3:1319:8a2e:370:7348", getHost("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678")); + assertEquals("127.0.0.1", getHost("PLAINTEXT://127.0.0.1:8000")); + assertEquals("mydomain.com", getHost("PLAINTEXT://mydomain.com:8080")); + assertEquals("::1", getHost("PLAINTEXT://[::1]:1234")); + assertEquals("2001:db8:85a3:8d3:1319:8a2e:370:7348", getHost("PLAINTEXT://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678")); } @Test public void testGetPort() { - assertEquals(8000, getPort("127.0.0.1:8000").intValue()); - assertEquals(8080, getPort("mydomain.com:8080").intValue()); - assertEquals(1234, getPort("[::1]:1234").intValue()); - assertEquals(5678, getPort("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678").intValue()); + assertEquals(8000, getPort("PLAINTEXT://127.0.0.1:8000").intValue()); + assertEquals(8080, getPort("PLAINTEXT://mydomain.com:8080").intValue()); + assertEquals(1234, getPort("PLAINTEXT://[::1]:1234").intValue()); + assertEquals(5678, getPort("PLAINTEXT://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678").intValue()); } @Test diff --git a/config/server.properties b/config/server.properties index 897ce78..7dbc86a 100644 --- a/config/server.properties +++ b/config/server.properties @@ -21,7 +21,7 @@ broker.id=0 ############################# Socket Server Settings ############################# -listeners=PLAINTEXT://0.0.0.0:9092 +listeners=PLAINTEXT://:9092 # The port the socket server listens on #port=9092 diff --git a/core/src/main/scala/kafka/server/KafkaHealthcheck.scala b/core/src/main/scala/kafka/server/KafkaHealthcheck.scala index 3ee1256..00cc696 100644 --- a/core/src/main/scala/kafka/server/KafkaHealthcheck.scala +++ b/core/src/main/scala/kafka/server/KafkaHealthcheck.scala @@ -55,7 +55,13 @@ class KafkaHealthcheck(private val brokerId: Int, */ def register() { val jmxPort = System.getProperty("com.sun.management.jmxremote.port", "-1").toInt - ZkUtils.registerBrokerInZk(zkClient, brokerId, advertisedEndpoints, zkSessionTimeoutMs, jmxPort) + val updatedEndpoints = advertisedEndpoints.map(endpoint => + if (endpoint.host == null || endpoint.host.trim.isEmpty) + EndPoint(InetAddress.getLocalHost.getCanonicalHostName, endpoint.port, endpoint.protocolType) + else + endpoint + ) + ZkUtils.registerBrokerInZk(zkClient, brokerId, updatedEndpoints, zkSessionTimeoutMs, jmxPort) } /** -- 1.9.3 (Apple Git-50) From e7a77b3e838130857751b140379eca58b615d36c Mon Sep 17 00:00:00 2001 From: Gwen Shapira Date: Thu, 25 Dec 2014 09:43:26 -0800 Subject: [PATCH 6/9] fix some issues regarding endpoint parsing. Also, larger segments for systest make the validation much faster --- .../java/org/apache/kafka/common/utils/Utils.java | 6 +++--- .../apache/kafka/common/utils/ClientUtilsTest.java | 6 +++--- .../org/apache/kafka/common/utils/UtilsTest.java | 8 ++++---- .../main/scala/kafka/cluster/BrokerEndPoint.scala | 15 +++++++++++---- core/src/main/scala/kafka/cluster/EndPoint.scala | 5 ++++- .../test/scala/unit/kafka/cluster/BrokerTest.scala | 22 ++++++++++++++++++++-- .../testcase_1/testcase_1_properties.json | 6 +++--- 7 files changed, 48 insertions(+), 20 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java index bd38d23..23922cb 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java @@ -24,7 +24,7 @@ import org.apache.kafka.common.KafkaException; public class Utils { - private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^(.*)://\\[?([0-9a-z\\-.:]*)\\]?:([0-9]+)"); + private static final Pattern HOST_PORT_PATTERN = Pattern.compile(".*?\\[?([0-9a-z\\-.:]*)\\]?:([0-9]+)"); public static String NL = System.getProperty("line.separator"); @@ -288,7 +288,7 @@ public class Utils { */ public static String getHost(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); - return matcher.matches() ? matcher.group(2) : null; + return matcher.matches() ? matcher.group(1) : null; } /** @@ -298,7 +298,7 @@ public class Utils { */ public static Integer getPort(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); - return matcher.matches() ? Integer.parseInt(matcher.group(3)) : null; + return matcher.matches() ? Integer.parseInt(matcher.group(2)) : null; } /** diff --git a/clients/src/test/java/org/apache/kafka/common/utils/ClientUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/ClientUtilsTest.java index c5d70b7..be66c87 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/ClientUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/ClientUtilsTest.java @@ -25,10 +25,10 @@ public class ClientUtilsTest { @Test public void testParseAndValidateAddresses() { - check("PLAINTEXT://127.0.0.1:8000"); + check("127.0.0.1:8000"); check("PLAINTEXT://mydomain.com:8080"); - check("PLAINTEXT://[::1]:8000"); - check("PLAINTEXT://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234", "PLAINTEXT://mydomain.com:10000"); + check("[::1]:8000"); + check("PLAINTEXT://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234", "mydomain.com:10000"); } @Test(expected = ConfigException.class) diff --git a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java index ad6529e..9a15d33 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java @@ -27,18 +27,18 @@ public class UtilsTest { @Test public void testGetHost() { - assertEquals("127.0.0.1", getHost("PLAINTEXT://127.0.0.1:8000")); + assertEquals("127.0.0.1", getHost("127.0.0.1:8000")); assertEquals("mydomain.com", getHost("PLAINTEXT://mydomain.com:8080")); - assertEquals("::1", getHost("PLAINTEXT://[::1]:1234")); + assertEquals("::1", getHost("[::1]:1234")); assertEquals("2001:db8:85a3:8d3:1319:8a2e:370:7348", getHost("PLAINTEXT://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678")); } @Test public void testGetPort() { assertEquals(8000, getPort("PLAINTEXT://127.0.0.1:8000").intValue()); - assertEquals(8080, getPort("PLAINTEXT://mydomain.com:8080").intValue()); + assertEquals(8080, getPort("mydomain.com:8080").intValue()); assertEquals(1234, getPort("PLAINTEXT://[::1]:1234").intValue()); - assertEquals(5678, getPort("PLAINTEXT://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678").intValue()); + assertEquals(5678, getPort("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678").intValue()); } @Test diff --git a/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala b/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala index 90ea612..66fc47b 100644 --- a/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala +++ b/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala @@ -3,18 +3,25 @@ package kafka.cluster import java.nio.ByteBuffer import kafka.api.ApiUtils._ +import kafka.common.KafkaException import org.apache.kafka.common.utils.Utils._ import kafka.cluster.ProtocolType._ object BrokerEndpoint { def createBrokerEndPoint(brokerId: Int, connectionString: String): BrokerEndpoint = { - val endPoint = EndPoint.createEndPoint(connectionString) - new BrokerEndpoint(brokerId,endPoint.host,endPoint.port.toInt) + val uriParseExp = """.*?\[?([0-9a-z\-.:]*)\]?:([0-9]+)""".r + + connectionString match { + case uriParseExp(host,port) => new BrokerEndpoint(brokerId,host,port.toInt) + case _ => throw new KafkaException("Unable to parse " + connectionString + " to a broker endpoint") + } } /** - * BrokerEndpoint includes the protocol type to allow locating the right endpoint in lists - * but we don't serialize or de-serialize it + * BrokerEndpoint is used to connect to specific host:port pair + * It is typically used by clients (or brokers when connecting to other brokers) + * and contains no information about the security protocol used on the connection + * clients should know which security protocol to use from configuration * this allows us to keep the wire protocol with the clients unchanged where the protocol is not needed * @param buffer * @return diff --git a/core/src/main/scala/kafka/cluster/EndPoint.scala b/core/src/main/scala/kafka/cluster/EndPoint.scala index 1b5fe8f..1e72927 100644 --- a/core/src/main/scala/kafka/cluster/EndPoint.scala +++ b/core/src/main/scala/kafka/cluster/EndPoint.scala @@ -33,7 +33,7 @@ object EndPoint { } def createEndPoint(connectionString: String): EndPoint = { - val uriParseExp = """^(.*)://([0-9a-z\-.]*):([0-9]+)""".r + val uriParseExp = """^(.*)://\[?([0-9a-z\-.:]*)\]?:([0-9]+)""".r connectionString match { case uriParseExp(protocol,"",port) => new EndPoint(null,port.toInt,ProtocolType.withName(protocol)) case uriParseExp(protocol,host,port) => new EndPoint(host,port.toInt,ProtocolType.withName(protocol)) @@ -42,6 +42,9 @@ object EndPoint { } } +/** + * Part of the broker definition - matching host/port pair to a protocol + */ case class EndPoint(host: String, port: Int, protocolType: ProtocolType) { override def toString: String = protocolType + "://" + host + ":" + port diff --git a/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala b/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala index 7d59037..c52c211 100644 --- a/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala @@ -80,16 +80,34 @@ class BrokerTest extends JUnit3Suite with Logging { } @Test + def testBrokerEndpointFromURI() = { + var connectionString = "localhost:9092" + var endpoint = BrokerEndpoint.createBrokerEndPoint(1,connectionString) + assert(endpoint.host == "localhost") + assert(endpoint.port == 9092) + // also test for ipv6 + connectionString = "[::1]:9092" + endpoint = BrokerEndpoint.createBrokerEndPoint(1,connectionString) + assert(endpoint.host == "::1") + assert(endpoint.port == 9092) + } + + @Test def testEndpointFromURI() = { var connectionString = "PLAINTEXT://localhost:9092" - var endpoint = BrokerEndpoint.createBrokerEndPoint(1,connectionString) + var endpoint = EndPoint.createEndPoint(connectionString) assert(endpoint.host == "localhost") assert(endpoint.port == 9092) // also test for default bind connectionString = "PLAINTEXT://:9092" - endpoint = BrokerEndpoint.createBrokerEndPoint(1,connectionString) + endpoint = EndPoint.createEndPoint(connectionString) assert(endpoint.host == null) assert(endpoint.port == 9092) + // also test for ipv6 + connectionString = "PLAINTEXT://[::1]:9092" + endpoint = EndPoint.createEndPoint(connectionString) + assert(endpoint.host == "::1") + assert(endpoint.port == 9092) } diff --git a/system_test/replication_testsuite/testcase_1/testcase_1_properties.json b/system_test/replication_testsuite/testcase_1/testcase_1_properties.json index 0c6d7a3..680213f 100644 --- a/system_test/replication_testsuite/testcase_1/testcase_1_properties.json +++ b/system_test/replication_testsuite/testcase_1/testcase_1_properties.json @@ -32,7 +32,7 @@ "entity_id": "1", "port": "9091", "broker.id": "1", - "log.segment.bytes": "20480", + "log.segment.bytes": "10000000", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" @@ -41,7 +41,7 @@ "entity_id": "2", "port": "9092", "broker.id": "2", - "log.segment.bytes": "20480", + "log.segment.bytes": "10000000", "log.dir": "/tmp/kafka_server_2_logs", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" @@ -50,7 +50,7 @@ "entity_id": "3", "port": "9093", "broker.id": "3", - "log.segment.bytes": "20480", + "log.segment.bytes": "10000000", "log.dir": "/tmp/kafka_server_3_logs", "log_filename": "kafka_server_9093.log", "config_filename": "kafka_server_9093.properties" -- 1.9.3 (Apple Git-50) From de48cd6fcbdadb805b16ab140da614d175b0d63f Mon Sep 17 00:00:00 2001 From: Gwen Shapira Date: Thu, 25 Dec 2014 10:15:16 -0800 Subject: [PATCH 7/9] added link to security wiki in doc --- .../kafka/clients/producer/ProducerConfig.java | 3 ++- core/src/main/scala/kafka/admin/AdminUtils.scala | 6 ++--- .../scala/kafka/api/ConsumerMetadataRequest.scala | 8 +++---- .../scala/kafka/api/ConsumerMetadataResponse.scala | 2 +- .../scala/kafka/api/TopicMetadataRequest.scala | 12 +++++----- .../scala/kafka/api/TopicMetadataResponse.scala | 2 +- core/src/main/scala/kafka/client/ClientUtils.scala | 8 +++---- core/src/main/scala/kafka/cluster/Broker.scala | 8 +++---- .../main/scala/kafka/cluster/BrokerEndPoint.scala | 2 +- core/src/main/scala/kafka/cluster/EndPoint.scala | 10 ++++----- .../main/scala/kafka/cluster/ProtocolType.scala | 26 ---------------------- .../scala/kafka/cluster/SecurityProtocol.scala | 26 ++++++++++++++++++++++ .../main/scala/kafka/consumer/ConsumerConfig.scala | 4 ++-- .../scala/kafka/javaapi/TopicMetadataRequest.scala | 12 +++++----- .../main/scala/kafka/network/SocketServer.scala | 2 +- .../main/scala/kafka/producer/ProducerConfig.scala | 4 ++-- core/src/main/scala/kafka/server/KafkaConfig.scala | 4 ++-- .../main/scala/kafka/server/MetadataCache.scala | 8 +++---- .../scala/kafka/tools/ConsumerOffsetChecker.scala | 4 ++-- .../main/scala/kafka/tools/GetOffsetShell.scala | 2 +- .../scala/kafka/tools/SimpleConsumerShell.scala | 2 +- .../main/scala/kafka/tools/UpdateOffsetsInZK.scala | 6 ++--- core/src/main/scala/kafka/utils/Utils.scala | 2 +- core/src/main/scala/kafka/utils/ZkUtils.scala | 6 ++--- .../test/scala/other/kafka/TestOffsetManager.scala | 6 ++--- .../scala/unit/kafka/admin/AddPartitionsTest.scala | 8 +++---- .../api/RequestResponseSerializationTest.scala | 16 ++++++------- .../test/scala/unit/kafka/cluster/BrokerTest.scala | 14 ++++++------ .../unit/kafka/consumer/ConsumerIteratorTest.scala | 2 +- .../unit/kafka/integration/TopicMetadataTest.scala | 4 ++-- .../unit/kafka/network/SocketServerTest.scala | 6 ++--- .../unit/kafka/producer/SyncProducerTest.scala | 18 +++++++-------- .../unit/kafka/server/AdvertiseBrokerTest.scala | 6 ++--- .../scala/unit/kafka/server/KafkaConfigTest.scala | 6 ++--- .../unit/kafka/server/LeaderElectionTest.scala | 2 +- .../test/scala/unit/kafka/utils/TestUtils.scala | 4 ++-- 36 files changed, 131 insertions(+), 130 deletions(-) delete mode 100644 core/src/main/scala/kafka/cluster/ProtocolType.scala create mode 100644 core/src/main/scala/kafka/cluster/SecurityProtocol.scala diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index 03d2fee..c2567fd 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -183,7 +183,8 @@ public class ProducerConfig extends AbstractConfig { /** security.protocol */ public static final String SECURITY_PROTOCOL = "security.protocol"; - private static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Currently only PLAINTEXT is supported. SSL and Kerberos are planned for the near future"; + private static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Currently only PLAINTEXT is supported (and is the default). SSL and Kerberos are planned for the near future. " + + " Details on Kafka's security plans are available on our wiki: https://cwiki.apache.org/confluence/display/KAFKA/Security"; static { config = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, Importance.HIGH, BOOSTRAP_SERVERS_DOC) diff --git a/core/src/main/scala/kafka/admin/AdminUtils.scala b/core/src/main/scala/kafka/admin/AdminUtils.scala index 7e57b8c..1f18872 100644 --- a/core/src/main/scala/kafka/admin/AdminUtils.scala +++ b/core/src/main/scala/kafka/admin/AdminUtils.scala @@ -21,8 +21,8 @@ import kafka.common._ import java.util.Random import java.util.Properties import kafka.api.{TopicMetadata, PartitionMetadata} -import kafka.cluster.ProtocolType.ProtocolType -import kafka.cluster.{BrokerEndpoint, Broker, ProtocolType} +import kafka.cluster.SecurityProtocol.SecurityProtocol +import kafka.cluster.{BrokerEndpoint, Broker, SecurityProtocol} import kafka.log.LogConfig import kafka.utils.{Logging, ZkUtils, Json} @@ -294,7 +294,7 @@ object AdminUtils extends Logging { - private def fetchTopicMetadataFromZk(topic: String, zkClient: ZkClient, cachedBrokerInfo: mutable.HashMap[Int, Broker], protocol: ProtocolType = ProtocolType.PLAINTEXT): TopicMetadata = { + private def fetchTopicMetadataFromZk(topic: String, zkClient: ZkClient, cachedBrokerInfo: mutable.HashMap[Int, Broker], protocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): TopicMetadata = { if(ZkUtils.pathExists(zkClient, ZkUtils.getTopicPath(topic))) { val topicPartitionAssignment = ZkUtils.getPartitionAssignmentForTopics(zkClient, List(topic)).get(topic).get val sortedPartitions = topicPartitionAssignment.toList.sortWith((m1, m2) => m1._1 < m2._1) diff --git a/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala b/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala index 6579b60..78ac228 100644 --- a/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala +++ b/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala @@ -18,8 +18,8 @@ package kafka.api import java.nio.ByteBuffer -import kafka.cluster.ProtocolType -import kafka.cluster.ProtocolType.ProtocolType +import kafka.cluster.SecurityProtocol +import kafka.cluster.SecurityProtocol.SecurityProtocol import kafka.network.{BoundedByteBufferSend, RequestChannel} import kafka.network.RequestChannel.Response import kafka.common.ErrorMapping @@ -35,7 +35,7 @@ object ConsumerMetadataRequest { val clientId = ApiUtils.readShortString(buffer) // request val group = ApiUtils.readShortString(buffer) - val securityProtocol = ProtocolType.withName(ApiUtils.readShortString(buffer)) + val securityProtocol = SecurityProtocol.withName(ApiUtils.readShortString(buffer)) ConsumerMetadataRequest(group, versionId, correlationId, clientId, securityProtocol) } @@ -45,7 +45,7 @@ case class ConsumerMetadataRequest(group: String, versionId: Short = ConsumerMetadataRequest.CurrentVersion, correlationId: Int = 0, clientId: String = ConsumerMetadataRequest.DefaultClientId, - securityProtocol: ProtocolType = ProtocolType.PLAINTEXT) + securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT) extends RequestOrResponse(Some(RequestKeys.ConsumerMetadataKey)) { def sizeInBytes = diff --git a/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala b/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala index 05b4c9a..f9a9101 100644 --- a/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala +++ b/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala @@ -18,7 +18,7 @@ package kafka.api import java.nio.ByteBuffer -import kafka.cluster.{ProtocolType, BrokerEndpoint, Broker} +import kafka.cluster.{SecurityProtocol, BrokerEndpoint, Broker} import kafka.common.ErrorMapping object ConsumerMetadataResponse { diff --git a/core/src/main/scala/kafka/api/TopicMetadataRequest.scala b/core/src/main/scala/kafka/api/TopicMetadataRequest.scala index 4f1ad1a..423eda6 100644 --- a/core/src/main/scala/kafka/api/TopicMetadataRequest.scala +++ b/core/src/main/scala/kafka/api/TopicMetadataRequest.scala @@ -19,9 +19,9 @@ package kafka.api import java.nio.ByteBuffer import kafka.api.ApiUtils._ -import kafka.cluster.ProtocolType -import kafka.cluster.ProtocolType.ProtocolType -import kafka.cluster.ProtocolType.ProtocolType +import kafka.cluster.SecurityProtocol +import kafka.cluster.SecurityProtocol.SecurityProtocol +import kafka.cluster.SecurityProtocol.SecurityProtocol import collection.mutable.ListBuffer import kafka.network.{BoundedByteBufferSend, RequestChannel} import kafka.common.ErrorMapping @@ -57,19 +57,19 @@ object TopicMetadataRequest extends Logging { if (versionId == 1) { securityProtocol = readShortString(buffer) } - new TopicMetadataRequest(versionId, correlationId, clientId, ProtocolType.withName(securityProtocol), topics.toList) + new TopicMetadataRequest(versionId, correlationId, clientId, SecurityProtocol.withName(securityProtocol), topics.toList) } } case class TopicMetadataRequest(val versionId: Short, val correlationId: Int, val clientId: String, - val securityProtocol: ProtocolType, + val securityProtocol: SecurityProtocol, val topics: Seq[String]) extends RequestOrResponse(Some(RequestKeys.MetadataKey)){ def this(topics: Seq[String], correlationId: Int) = - this(TopicMetadataRequest.CurrentVersion, correlationId, TopicMetadataRequest.DefaultClientId, ProtocolType.PLAINTEXT, topics) + this(TopicMetadataRequest.CurrentVersion, correlationId, TopicMetadataRequest.DefaultClientId, SecurityProtocol.PLAINTEXT, topics) def writeTo(buffer: ByteBuffer) { buffer.putShort(versionId) diff --git a/core/src/main/scala/kafka/api/TopicMetadataResponse.scala b/core/src/main/scala/kafka/api/TopicMetadataResponse.scala index 70450be..c55f60c 100644 --- a/core/src/main/scala/kafka/api/TopicMetadataResponse.scala +++ b/core/src/main/scala/kafka/api/TopicMetadataResponse.scala @@ -17,7 +17,7 @@ package kafka.api -import kafka.cluster.{ProtocolType, BrokerEndpoint, Broker} +import kafka.cluster.{SecurityProtocol, BrokerEndpoint, Broker} import java.nio.ByteBuffer object TopicMetadataResponse { diff --git a/core/src/main/scala/kafka/client/ClientUtils.scala b/core/src/main/scala/kafka/client/ClientUtils.scala index 2d53350..01618c5 100644 --- a/core/src/main/scala/kafka/client/ClientUtils.scala +++ b/core/src/main/scala/kafka/client/ClientUtils.scala @@ -16,7 +16,7 @@ */ package kafka.client - import kafka.cluster.ProtocolType.ProtocolType + import kafka.cluster.SecurityProtocol.SecurityProtocol import scala.collection._ import kafka.cluster._ @@ -98,7 +98,7 @@ object ClientUtils extends Logging{ /** * Parse a list of broker urls in the form host1:port1, host2:port2, ... */ - def parseBrokerList(brokerListStr: String, protocolType: ProtocolType = ProtocolType.PLAINTEXT): Seq[BrokerEndpoint] = { + def parseBrokerList(brokerListStr: String, protocolType: SecurityProtocol = SecurityProtocol.PLAINTEXT): Seq[BrokerEndpoint] = { val brokersStr = Utils.parseCsvList(brokerListStr) brokersStr.zipWithIndex.map { case (address, brokerId) => @@ -109,7 +109,7 @@ object ClientUtils extends Logging{ /** * Creates a blocking channel to a random broker */ - def channelToAnyBroker(zkClient: ZkClient, protocolType: ProtocolType, socketTimeoutMs: Int = 3000) : BlockingChannel = { + def channelToAnyBroker(zkClient: ZkClient, protocolType: SecurityProtocol, socketTimeoutMs: Int = 3000) : BlockingChannel = { var channel: BlockingChannel = null var connected = false while (!connected) { @@ -138,7 +138,7 @@ object ClientUtils extends Logging{ /** * Creates a blocking channel to the offset manager of the given group */ - def channelToOffsetManager(group: String, zkClient: ZkClient, socketTimeoutMs: Int = 3000, retryBackOffMs: Int = 1000, protocolType: ProtocolType = ProtocolType.PLAINTEXT) = { + def channelToOffsetManager(group: String, zkClient: ZkClient, socketTimeoutMs: Int = 3000, retryBackOffMs: Int = 1000, protocolType: SecurityProtocol = SecurityProtocol.PLAINTEXT) = { var queryChannel = channelToAnyBroker(zkClient, protocolType) var offsetManagerChannelOpt: Option[BlockingChannel] = None diff --git a/core/src/main/scala/kafka/cluster/Broker.scala b/core/src/main/scala/kafka/cluster/Broker.scala index a84a34e..f81acfa 100644 --- a/core/src/main/scala/kafka/cluster/Broker.scala +++ b/core/src/main/scala/kafka/cluster/Broker.scala @@ -21,7 +21,7 @@ import kafka.utils.Utils._ import kafka.utils.{Utils, Json} import java.nio.ByteBuffer import kafka.common.{BrokerEndPointNotAvailableException, KafkaException, BrokerNotAvailableException} -import kafka.cluster.ProtocolType._ +import kafka.cluster.SecurityProtocol._ /** * A Kafka broker @@ -93,7 +93,7 @@ case class Broker(id: Int, endPoints: Seq[EndPoint]) { override def toString: String = id + " : " + endPoints.mkString("(",",",")") def this(id: Int, host: String, port: Int) = { - this(id,List(EndPoint(host,port,ProtocolType.PLAINTEXT))) + this(id,List(EndPoint(host,port,SecurityProtocol.PLAINTEXT))) } @@ -110,11 +110,11 @@ case class Broker(id: Int, endPoints: Seq[EndPoint]) { 4 + /* number of endPoints */ endPoints.map(_.sizeInBytes).sum /* end points */ - def supportsChannel(protocolType: ProtocolType): Unit = { + def supportsChannel(protocolType: SecurityProtocol): Unit = { endPoints.map((endpoint)=>(endpoint.protocolType)).contains(protocolType) } - def getBrokerEndPoint(protocolType: ProtocolType): BrokerEndpoint = { + def getBrokerEndPoint(protocolType: SecurityProtocol): BrokerEndpoint = { val endpoint = endPoints.map((endpoint)=>(endpoint.protocolType,endpoint)).toMap.get(protocolType) endpoint match { case Some(endpoint) => new BrokerEndpoint(id,endpoint.host,endpoint.port) diff --git a/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala b/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala index 66fc47b..7e6825a 100644 --- a/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala +++ b/core/src/main/scala/kafka/cluster/BrokerEndPoint.scala @@ -5,7 +5,7 @@ import java.nio.ByteBuffer import kafka.api.ApiUtils._ import kafka.common.KafkaException import org.apache.kafka.common.utils.Utils._ -import kafka.cluster.ProtocolType._ +import kafka.cluster.SecurityProtocol._ object BrokerEndpoint { def createBrokerEndPoint(brokerId: Int, connectionString: String): BrokerEndpoint = { diff --git a/core/src/main/scala/kafka/cluster/EndPoint.scala b/core/src/main/scala/kafka/cluster/EndPoint.scala index 1e72927..364b2f7 100644 --- a/core/src/main/scala/kafka/cluster/EndPoint.scala +++ b/core/src/main/scala/kafka/cluster/EndPoint.scala @@ -21,7 +21,7 @@ import java.nio.ByteBuffer import kafka.api.ApiUtils._ import kafka.common.KafkaException -import kafka.cluster.ProtocolType._ +import kafka.cluster.SecurityProtocol._ object EndPoint { @@ -29,14 +29,14 @@ object EndPoint { val port = buffer.getInt() val host = readShortString(buffer) val channel = readShortString(buffer) - EndPoint(host,port,ProtocolType.withName(channel)) + EndPoint(host,port,SecurityProtocol.withName(channel)) } def createEndPoint(connectionString: String): EndPoint = { val uriParseExp = """^(.*)://\[?([0-9a-z\-.:]*)\]?:([0-9]+)""".r connectionString match { - case uriParseExp(protocol,"",port) => new EndPoint(null,port.toInt,ProtocolType.withName(protocol)) - case uriParseExp(protocol,host,port) => new EndPoint(host,port.toInt,ProtocolType.withName(protocol)) + case uriParseExp(protocol,"",port) => new EndPoint(null,port.toInt,SecurityProtocol.withName(protocol)) + case uriParseExp(protocol,host,port) => new EndPoint(host,port.toInt,SecurityProtocol.withName(protocol)) case _ => throw new KafkaException("Unable to parse " + connectionString + " to a broker endpoint") } } @@ -45,7 +45,7 @@ object EndPoint { /** * Part of the broker definition - matching host/port pair to a protocol */ -case class EndPoint(host: String, port: Int, protocolType: ProtocolType) { +case class EndPoint(host: String, port: Int, protocolType: SecurityProtocol) { override def toString: String = protocolType + "://" + host + ":" + port diff --git a/core/src/main/scala/kafka/cluster/ProtocolType.scala b/core/src/main/scala/kafka/cluster/ProtocolType.scala deleted file mode 100644 index f4e6bc3..0000000 --- a/core/src/main/scala/kafka/cluster/ProtocolType.scala +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.cluster - - -object ProtocolType extends Enumeration { - - type ProtocolType = Value - val PLAINTEXT = Value -} - diff --git a/core/src/main/scala/kafka/cluster/SecurityProtocol.scala b/core/src/main/scala/kafka/cluster/SecurityProtocol.scala new file mode 100644 index 0000000..82b4631 --- /dev/null +++ b/core/src/main/scala/kafka/cluster/SecurityProtocol.scala @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.cluster + + +object SecurityProtocol extends Enumeration { + + type SecurityProtocol = Value + val PLAINTEXT = Value +} + diff --git a/core/src/main/scala/kafka/consumer/ConsumerConfig.scala b/core/src/main/scala/kafka/consumer/ConsumerConfig.scala index fe89a30..054bd62 100644 --- a/core/src/main/scala/kafka/consumer/ConsumerConfig.scala +++ b/core/src/main/scala/kafka/consumer/ConsumerConfig.scala @@ -19,7 +19,7 @@ package kafka.consumer import java.util.Properties import kafka.api.OffsetRequest -import kafka.cluster.ProtocolType +import kafka.cluster.SecurityProtocol import kafka.utils._ import kafka.common.{InvalidConfigException, Config} @@ -184,7 +184,7 @@ class ConsumerConfig private (val props: VerifiableProperties) extends ZKConfig( val partitionAssignmentStrategy = props.getString("partition.assignment.strategy", DefaultPartitionAssignmentStrategy) /* plaintext or SSL */ - val securityProtocol = ProtocolType.withName(props.getString("security.protocol", "PLAINTEXT")) + val securityProtocol = SecurityProtocol.withName(props.getString("security.protocol", "PLAINTEXT")) validate(this) } diff --git a/core/src/main/scala/kafka/javaapi/TopicMetadataRequest.scala b/core/src/main/scala/kafka/javaapi/TopicMetadataRequest.scala index 3ea12c9..de33b04 100644 --- a/core/src/main/scala/kafka/javaapi/TopicMetadataRequest.scala +++ b/core/src/main/scala/kafka/javaapi/TopicMetadataRequest.scala @@ -18,9 +18,9 @@ package kafka.javaapi import kafka.api._ import java.nio.ByteBuffer -import kafka.cluster.ProtocolType -import kafka.cluster.ProtocolType.ProtocolType -import kafka.cluster.ProtocolType.ProtocolType +import kafka.cluster.SecurityProtocol +import kafka.cluster.SecurityProtocol.SecurityProtocol +import kafka.cluster.SecurityProtocol.SecurityProtocol import scala.collection.mutable import kafka.network.{BoundedByteBufferSend, RequestChannel} @@ -30,7 +30,7 @@ import kafka.network.RequestChannel.Response class TopicMetadataRequest(val versionId: Short, val correlationId: Int, val clientId: String, - val securityProtocol: ProtocolType, + val securityProtocol: SecurityProtocol, val topics: java.util.List[String]) extends RequestOrResponse(Some(kafka.api.RequestKeys.MetadataKey)) { @@ -40,10 +40,10 @@ class TopicMetadataRequest(val versionId: Short, } def this(topics: java.util.List[String]) = - this(kafka.api.TopicMetadataRequest.CurrentVersion, 0, kafka.api.TopicMetadataRequest.DefaultClientId, ProtocolType.PLAINTEXT, topics) + this(kafka.api.TopicMetadataRequest.CurrentVersion, 0, kafka.api.TopicMetadataRequest.DefaultClientId, SecurityProtocol.PLAINTEXT, topics) def this(topics: java.util.List[String], correlationId: Int) = - this(kafka.api.TopicMetadataRequest.CurrentVersion, correlationId, kafka.api.TopicMetadataRequest.DefaultClientId, ProtocolType.PLAINTEXT, topics) + this(kafka.api.TopicMetadataRequest.CurrentVersion, correlationId, kafka.api.TopicMetadataRequest.DefaultClientId, SecurityProtocol.PLAINTEXT, topics) def writeTo(buffer: ByteBuffer) = underlying.writeTo(buffer) diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index 1161be0..0e37459 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -25,7 +25,7 @@ import java.io._ import java.nio.channels._ import kafka.cluster.EndPoint -import kafka.cluster.ProtocolType.ProtocolType +import kafka.cluster.SecurityProtocol.SecurityProtocol import scala.collection._ import scala.collection.JavaConversions._ diff --git a/core/src/main/scala/kafka/producer/ProducerConfig.scala b/core/src/main/scala/kafka/producer/ProducerConfig.scala index bbbf651..9aff401 100644 --- a/core/src/main/scala/kafka/producer/ProducerConfig.scala +++ b/core/src/main/scala/kafka/producer/ProducerConfig.scala @@ -19,7 +19,7 @@ package kafka.producer import async.AsyncProducerConfig import java.util.Properties -import kafka.cluster.ProtocolType +import kafka.cluster.SecurityProtocol import kafka.utils.{Utils, VerifiableProperties} import kafka.message.{CompressionCodec, NoCompressionCodec} import kafka.common.{InvalidConfigException, Config} @@ -115,7 +115,7 @@ class ProducerConfig private (val props: VerifiableProperties) val topicMetadataRefreshIntervalMs = props.getInt("topic.metadata.refresh.interval.ms", 600000) /* plaintext or SSL */ - val securityProtocol = ProtocolType.withName(props.getString("security.protocol", "PLAINTEXT")) + val securityProtocol = SecurityProtocol.withName(props.getString("security.protocol", "PLAINTEXT")) validate(this) } diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 0f42303..8d1739e 100644 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -18,7 +18,7 @@ package kafka.server import java.util.Properties -import kafka.cluster.ProtocolType +import kafka.cluster.SecurityProtocol import kafka.message.{MessageSet, Message} import kafka.consumer.ConsumerConfig import kafka.utils.{VerifiableProperties, ZKConfig, Utils} @@ -122,7 +122,7 @@ class KafkaConfig private (val props: VerifiableProperties) extends ZKConfig(pro val connectionsMaxIdleMs = props.getLong("connections.max.idle.ms", 10*60*1000L) /* security protocol used to communicate between brokers */ - val securityProtocol = ProtocolType.withName(props.getString("security.protocol","PLAINTEXT")) + val securityProtocol = SecurityProtocol.withName(props.getString("security.protocol","PLAINTEXT")) /*********** Log Configuration ***********/ diff --git a/core/src/main/scala/kafka/server/MetadataCache.scala b/core/src/main/scala/kafka/server/MetadataCache.scala index 3df176c..cb96edb 100644 --- a/core/src/main/scala/kafka/server/MetadataCache.scala +++ b/core/src/main/scala/kafka/server/MetadataCache.scala @@ -17,12 +17,12 @@ package kafka.server -import kafka.cluster.ProtocolType.ProtocolType -import kafka.cluster.ProtocolType.ProtocolType +import kafka.cluster.SecurityProtocol.SecurityProtocol +import kafka.cluster.SecurityProtocol.SecurityProtocol import scala.collection.{Seq, Set, mutable} import kafka.api._ -import kafka.cluster.{BrokerEndpoint, ProtocolType, Broker} +import kafka.cluster.{BrokerEndpoint, SecurityProtocol, Broker} import java.util.concurrent.locks.ReentrantReadWriteLock import kafka.utils.Utils._ import kafka.common.{ErrorMapping, ReplicaNotAvailableException, LeaderNotAvailableException} @@ -39,7 +39,7 @@ private[server] class MetadataCache { private var aliveBrokers: Map[Int, Broker] = Map() private val partitionMetadataLock = new ReentrantReadWriteLock() - def getTopicMetadata(topics: Set[String], protocol: ProtocolType = ProtocolType.PLAINTEXT) = { + def getTopicMetadata(topics: Set[String], protocol: SecurityProtocol = SecurityProtocol.PLAINTEXT) = { val isAllTopics = topics.isEmpty val topicsRequested = if(isAllTopics) cache.keySet else topics val topicResponses: mutable.ListBuffer[TopicMetadata] = new mutable.ListBuffer[TopicMetadata] diff --git a/core/src/main/scala/kafka/tools/ConsumerOffsetChecker.scala b/core/src/main/scala/kafka/tools/ConsumerOffsetChecker.scala index 7198322..03b121d 100644 --- a/core/src/main/scala/kafka/tools/ConsumerOffsetChecker.scala +++ b/core/src/main/scala/kafka/tools/ConsumerOffsetChecker.scala @@ -19,7 +19,7 @@ package kafka.tools import joptsimple._ -import kafka.cluster.ProtocolType +import kafka.cluster.SecurityProtocol import org.I0Itec.zkclient.ZkClient import kafka.utils._ import kafka.consumer.SimpleConsumer @@ -159,7 +159,7 @@ object ConsumerOffsetChecker extends Logging { topicPidMap = immutable.Map(ZkUtils.getPartitionsForTopics(zkClient, topicList).toSeq:_*) val topicPartitions = topicPidMap.flatMap { case(topic, partitionSeq) => partitionSeq.map(TopicAndPartition(topic, _)) }.toSeq - val channel = ClientUtils.channelToOffsetManager(group, zkClient, channelSocketTimeoutMs, channelRetryBackoffMs, ProtocolType.PLAINTEXT) + val channel = ClientUtils.channelToOffsetManager(group, zkClient, channelSocketTimeoutMs, channelRetryBackoffMs, SecurityProtocol.PLAINTEXT) debug("Sending offset fetch request to coordinator %s:%d.".format(channel.host, channel.port)) channel.send(OffsetFetchRequest(group, topicPartitions)) diff --git a/core/src/main/scala/kafka/tools/GetOffsetShell.scala b/core/src/main/scala/kafka/tools/GetOffsetShell.scala index 1596516..4f0b463 100644 --- a/core/src/main/scala/kafka/tools/GetOffsetShell.scala +++ b/core/src/main/scala/kafka/tools/GetOffsetShell.scala @@ -24,7 +24,7 @@ import kafka.api.{PartitionOffsetRequestInfo, OffsetRequest} import kafka.common.TopicAndPartition import kafka.client.ClientUtils import kafka.utils.{ToolsUtils, CommandLineUtils} -import kafka.cluster.ProtocolType +import kafka.cluster.SecurityProtocol object GetOffsetShell { diff --git a/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala b/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala index ac86762..84c09c2 100644 --- a/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala +++ b/core/src/main/scala/kafka/tools/SimpleConsumerShell.scala @@ -22,7 +22,7 @@ import kafka.utils._ import kafka.consumer._ import kafka.client.ClientUtils import kafka.api.{OffsetRequest, FetchRequestBuilder, Request} -import kafka.cluster.{BrokerEndpoint, ProtocolType, Broker} +import kafka.cluster.{BrokerEndpoint, SecurityProtocol, Broker} import scala.collection.JavaConversions._ import kafka.common.TopicAndPartition diff --git a/core/src/main/scala/kafka/tools/UpdateOffsetsInZK.scala b/core/src/main/scala/kafka/tools/UpdateOffsetsInZK.scala index 38d3b48..c6c5a88 100644 --- a/core/src/main/scala/kafka/tools/UpdateOffsetsInZK.scala +++ b/core/src/main/scala/kafka/tools/UpdateOffsetsInZK.scala @@ -17,7 +17,7 @@ package kafka.tools -import kafka.cluster.ProtocolType +import kafka.cluster.SecurityProtocol import org.I0Itec.zkclient.ZkClient import kafka.consumer.{SimpleConsumer, ConsumerConfig} import kafka.api.{PartitionOffsetRequestInfo, OffsetRequest} @@ -66,8 +66,8 @@ object UpdateOffsetsInZK { ZkUtils.getBrokerInfo(zkClient, broker) match { case Some(brokerInfo) => - val consumer = new SimpleConsumer(brokerInfo.getBrokerEndPoint(ProtocolType.PLAINTEXT).host, - brokerInfo.getBrokerEndPoint(ProtocolType.PLAINTEXT).port, + val consumer = new SimpleConsumer(brokerInfo.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).host, + brokerInfo.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).port, 10000, 100 * 1024, "UpdateOffsetsInZk") val topicAndPartition = TopicAndPartition(topic, partition) val request = OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(offsetOption, 1))) diff --git a/core/src/main/scala/kafka/utils/Utils.scala b/core/src/main/scala/kafka/utils/Utils.scala index b823c1e..0a96c0f 100644 --- a/core/src/main/scala/kafka/utils/Utils.scala +++ b/core/src/main/scala/kafka/utils/Utils.scala @@ -31,7 +31,7 @@ import java.util.Properties import kafka.common.KafkaException import kafka.common.KafkaStorageException import kafka.cluster.EndPoint -import kafka.cluster.ProtocolType +import kafka.cluster.SecurityProtocol /** diff --git a/core/src/main/scala/kafka/utils/ZkUtils.scala b/core/src/main/scala/kafka/utils/ZkUtils.scala index aed294f..9f700f1 100644 --- a/core/src/main/scala/kafka/utils/ZkUtils.scala +++ b/core/src/main/scala/kafka/utils/ZkUtils.scala @@ -17,7 +17,7 @@ package kafka.utils -import kafka.cluster.ProtocolType.ProtocolType +import kafka.cluster.SecurityProtocol.SecurityProtocol import kafka.cluster._ import kafka.consumer.{ConsumerThreadId, TopicCount} import org.I0Itec.zkclient.ZkClient @@ -83,7 +83,7 @@ object ZkUtils extends Logging { brokerIds.map(_.toInt).map(getBrokerInfo(zkClient, _)).filter(_.isDefined).map(_.get) } - def getAllBrokerEndPointsForChannel(zkClient: ZkClient, protocolType: ProtocolType): Seq[BrokerEndpoint] = { + def getAllBrokerEndPointsForChannel(zkClient: ZkClient, protocolType: SecurityProtocol): Seq[BrokerEndpoint] = { getAllBrokersInCluster(zkClient).map(_.getBrokerEndPoint(protocolType)) } @@ -166,7 +166,7 @@ object ZkUtils extends Logging { def registerBrokerInZk(zkClient: ZkClient, id: Int, advertisedEndpoints: Seq[EndPoint], timeout: Int, jmxPort: Int) { val brokerIdPath = ZkUtils.BrokerIdsPath + "/" + id val timestamp = SystemTime.milliseconds.toString - val defaultEndPoint = advertisedEndpoints.find(_.protocolType == ProtocolType.PLAINTEXT).get + val defaultEndPoint = advertisedEndpoints.find(_.protocolType == SecurityProtocol.PLAINTEXT).get val host = defaultEndPoint.host val port = defaultEndPoint.port diff --git a/core/src/test/scala/other/kafka/TestOffsetManager.scala b/core/src/test/scala/other/kafka/TestOffsetManager.scala index 67301ed..28f70b4 100644 --- a/core/src/test/scala/other/kafka/TestOffsetManager.scala +++ b/core/src/test/scala/other/kafka/TestOffsetManager.scala @@ -1,6 +1,6 @@ package other.kafka -import kafka.cluster.ProtocolType +import kafka.cluster.SecurityProtocol import org.I0Itec.zkclient.ZkClient import kafka.api._ import kafka.utils.{ShutdownableThread, ZKStringSerializer} @@ -111,7 +111,7 @@ object TestOffsetManager { private val fetchTimer = new KafkaTimer(timer) private val channels = mutable.Map[Int, BlockingChannel]() - private var metadataChannel = ClientUtils.channelToAnyBroker(zkClient, ProtocolType.PLAINTEXT, SocketTimeoutMs) + private var metadataChannel = ClientUtils.channelToAnyBroker(zkClient, SecurityProtocol.PLAINTEXT, SocketTimeoutMs) private val numErrors = new AtomicInteger(0) @@ -157,7 +157,7 @@ object TestOffsetManager { println("Error while querying %s:%d - shutting down query channel.".format(metadataChannel.host, metadataChannel.port)) metadataChannel.disconnect() println("Creating new query channel.") - metadataChannel = ClientUtils.channelToAnyBroker(zkClient, ProtocolType.PLAINTEXT, SocketTimeoutMs) + metadataChannel = ClientUtils.channelToAnyBroker(zkClient, SecurityProtocol.PLAINTEXT, SocketTimeoutMs) } finally { Thread.sleep(fetchIntervalMs) diff --git a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala index 2f338b7..7118b40 100644 --- a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala +++ b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala @@ -22,7 +22,7 @@ import kafka.zk.ZooKeeperTestHarness import kafka.utils.TestUtils._ import junit.framework.Assert._ import kafka.utils.{ZkUtils, Utils, TestUtils} -import kafka.cluster.{ProtocolType, EndPoint, Broker} +import kafka.cluster.{SecurityProtocol, EndPoint, Broker} import kafka.client.ClientUtils import kafka.server.{KafkaConfig, KafkaServer} @@ -109,7 +109,7 @@ class AddPartitionsTest extends JUnit3Suite with ZooKeeperTestHarness { // read metadata from a broker and verify the new topic partitions exist TestUtils.waitUntilMetadataIsPropagated(servers, topic1, 1) TestUtils.waitUntilMetadataIsPropagated(servers, topic1, 2) - val metadata = ClientUtils.fetchTopicMetadata(Set(topic1), brokers.map(_.getBrokerEndPoint(ProtocolType.PLAINTEXT)), "AddPartitionsTest-testIncrementPartitions", + val metadata = ClientUtils.fetchTopicMetadata(Set(topic1), brokers.map(_.getBrokerEndPoint(SecurityProtocol.PLAINTEXT)), "AddPartitionsTest-testIncrementPartitions", 2000,0).topicsMetadata val metaDataForTopic1 = metadata.filter(p => p.topic.equals(topic1)) val partitionDataForTopic1 = metaDataForTopic1.head.partitionsMetadata @@ -134,7 +134,7 @@ class AddPartitionsTest extends JUnit3Suite with ZooKeeperTestHarness { // read metadata from a broker and verify the new topic partitions exist TestUtils.waitUntilMetadataIsPropagated(servers, topic2, 1) TestUtils.waitUntilMetadataIsPropagated(servers, topic2, 2) - val metadata = ClientUtils.fetchTopicMetadata(Set(topic2), brokers.map(_.getBrokerEndPoint(ProtocolType.PLAINTEXT)), "AddPartitionsTest-testManualAssignmentOfReplicas", + val metadata = ClientUtils.fetchTopicMetadata(Set(topic2), brokers.map(_.getBrokerEndPoint(SecurityProtocol.PLAINTEXT)), "AddPartitionsTest-testManualAssignmentOfReplicas", 2000,0).topicsMetadata val metaDataForTopic2 = metadata.filter(p => p.topic.equals(topic2)) val partitionDataForTopic2 = metaDataForTopic2.head.partitionsMetadata @@ -158,7 +158,7 @@ class AddPartitionsTest extends JUnit3Suite with ZooKeeperTestHarness { TestUtils.waitUntilMetadataIsPropagated(servers, topic3, 5) TestUtils.waitUntilMetadataIsPropagated(servers, topic3, 6) - val metadata = ClientUtils.fetchTopicMetadata(Set(topic3), brokers.map(_.getBrokerEndPoint(ProtocolType.PLAINTEXT)), "AddPartitionsTest-testReplicaPlacement", + val metadata = ClientUtils.fetchTopicMetadata(Set(topic3), brokers.map(_.getBrokerEndPoint(SecurityProtocol.PLAINTEXT)), "AddPartitionsTest-testReplicaPlacement", 2000,0).topicsMetadata val metaDataForTopic3 = metadata.filter(p => p.topic.equals(topic3)).head diff --git a/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala b/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala index ea70e2c..595ac5c 100644 --- a/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala +++ b/core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala @@ -22,7 +22,7 @@ import org.scalatest.junit.JUnitSuite import junit.framework.Assert._ import java.nio.ByteBuffer import kafka.message.{Message, ByteBufferMessageSet} -import kafka.cluster.{BrokerEndpoint, ProtocolType, EndPoint, Broker} +import kafka.cluster.{BrokerEndpoint, SecurityProtocol, EndPoint, Broker} import kafka.common.{OffsetAndMetadata, ErrorMapping, OffsetMetadataAndError} import kafka.utils.SystemTime import org.apache.kafka.common.requests._ @@ -80,10 +80,10 @@ object SerializationTestUtils { TopicAndPartition(topic2, 3) -> PartitionFetchInfo(4000, 100) ) - private val brokers = List(new Broker(0, List(EndPoint("localhost", 1011, ProtocolType.PLAINTEXT))), - new Broker(1, List(EndPoint("localhost", 1012, ProtocolType.PLAINTEXT))), - new Broker(2, List(EndPoint("localhost", 1013, ProtocolType.PLAINTEXT)))) - private val brokerEndpoints = brokers.map(_.getBrokerEndPoint(ProtocolType.PLAINTEXT)) + private val brokers = List(new Broker(0, List(EndPoint("localhost", 1011, SecurityProtocol.PLAINTEXT))), + new Broker(1, List(EndPoint("localhost", 1012, SecurityProtocol.PLAINTEXT))), + new Broker(2, List(EndPoint("localhost", 1013, SecurityProtocol.PLAINTEXT)))) + private val brokerEndpoints = brokers.map(_.getBrokerEndPoint(SecurityProtocol.PLAINTEXT)) private val partitionMetaData0 = new PartitionMetadata(0, Some(brokerEndpoints.head), replicas = brokerEndpoints, isr = brokerEndpoints, errorCode = 0) private val partitionMetaData1 = new PartitionMetadata(1, Some(brokerEndpoints.head), replicas = brokerEndpoints, isr = brokerEndpoints.tail, errorCode = 1) @@ -148,11 +148,11 @@ object SerializationTestUtils { } def createTestTopicMetadataRequest: TopicMetadataRequest = { - new TopicMetadataRequest(1, 1, "client 1", ProtocolType.PLAINTEXT, Seq(topic1, topic2)) + new TopicMetadataRequest(1, 1, "client 1", SecurityProtocol.PLAINTEXT, Seq(topic1, topic2)) } def createTestTopicMetadataResponse: TopicMetadataResponse = { - new TopicMetadataResponse(brokers.map(_.getBrokerEndPoint(ProtocolType.PLAINTEXT)).toVector, Seq(topicmetaData1, topicmetaData2), 1) + new TopicMetadataResponse(brokers.map(_.getBrokerEndPoint(SecurityProtocol.PLAINTEXT)).toVector, Seq(topicmetaData1, topicmetaData2), 1) } def createTestOffsetCommitRequestV1: OffsetCommitRequest = { @@ -196,7 +196,7 @@ object SerializationTestUtils { } def createConsumerMetadataResponse: ConsumerMetadataResponse = { - ConsumerMetadataResponse(Some(brokers.head.getBrokerEndPoint(ProtocolType.PLAINTEXT)), ErrorMapping.NoError) + ConsumerMetadataResponse(Some(brokers.head.getBrokerEndPoint(SecurityProtocol.PLAINTEXT)), ErrorMapping.NoError) } def createHeartbeatRequestAndHeader: HeartbeatRequestAndHeader = { diff --git a/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala b/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala index c52c211..7a30f3f 100644 --- a/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/BrokerTest.scala @@ -30,7 +30,7 @@ class BrokerTest extends JUnit3Suite with Logging { @Test def testSerDe() = { - val endpoint = new EndPoint("myhost",9092,ProtocolType.PLAINTEXT) + val endpoint = new EndPoint("myhost",9092,SecurityProtocol.PLAINTEXT) val listEndPoints = List(endpoint) val origBroker = new Broker(1,listEndPoints) val brokerBytes = ByteBuffer.allocate(origBroker.sizeInBytes) @@ -43,10 +43,10 @@ class BrokerTest extends JUnit3Suite with Logging { @Test def testHashAndEquals() = { - val endpoint1 = new EndPoint("myhost",9092,ProtocolType.PLAINTEXT) - val endpoint2 = new EndPoint("myhost",9092,ProtocolType.PLAINTEXT) - val endpoint3 = new EndPoint("myhost",1111,ProtocolType.PLAINTEXT) - val endpoint4 = new EndPoint("other",1111,ProtocolType.PLAINTEXT) + val endpoint1 = new EndPoint("myhost",9092,SecurityProtocol.PLAINTEXT) + val endpoint2 = new EndPoint("myhost",9092,SecurityProtocol.PLAINTEXT) + val endpoint3 = new EndPoint("myhost",1111,SecurityProtocol.PLAINTEXT) + val endpoint4 = new EndPoint("other",1111,SecurityProtocol.PLAINTEXT) val broker1 = new Broker(1,List(endpoint1)) val broker2 = new Broker(1,List(endpoint2)) val broker3 = new Broker(2,List(endpoint3)) @@ -75,8 +75,8 @@ class BrokerTest extends JUnit3Suite with Logging { "\"endpoints\":\"PLAINTEXT://localhost:9092\"}" val broker = Broker.createBroker(1, brokerInfoStr) assert(broker.id == 1) - assert(broker.getBrokerEndPoint(ProtocolType.PLAINTEXT).host == "localhost") - assert(broker.getBrokerEndPoint(ProtocolType.PLAINTEXT).port == 9092) + assert(broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).host == "localhost") + assert(broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).port == 9092) } @Test diff --git a/core/src/test/scala/unit/kafka/consumer/ConsumerIteratorTest.scala b/core/src/test/scala/unit/kafka/consumer/ConsumerIteratorTest.scala index ec31b34..e48dc0b 100644 --- a/core/src/test/scala/unit/kafka/consumer/ConsumerIteratorTest.scala +++ b/core/src/test/scala/unit/kafka/consumer/ConsumerIteratorTest.scala @@ -29,7 +29,7 @@ import kafka.utils.TestUtils._ import kafka.utils._ import org.junit.Test import kafka.serializer._ -import kafka.cluster.{ProtocolType, EndPoint, Broker, Cluster} +import kafka.cluster.{SecurityProtocol, EndPoint, Broker, Cluster} import org.scalatest.junit.JUnit3Suite import kafka.integration.KafkaServerTestHarness diff --git a/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala b/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala index ceb39fa..025a349 100644 --- a/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala +++ b/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala @@ -22,7 +22,7 @@ import kafka.zk.ZooKeeperTestHarness import kafka.admin.AdminUtils import java.nio.ByteBuffer import junit.framework.Assert._ -import kafka.cluster.{ProtocolType, EndPoint, Broker} +import kafka.cluster.{SecurityProtocol, EndPoint, Broker} import kafka.utils.{Utils, TestUtils} import kafka.utils.TestUtils._ import kafka.server.{KafkaServer, KafkaConfig} @@ -34,7 +34,7 @@ class TopicMetadataTest extends JUnit3Suite with ZooKeeperTestHarness { val props = createBrokerConfigs(1) val configs = props.map(p => new KafkaConfig(p)) private var server1: KafkaServer = null - val brokerEndPoints = configs.map(c => new Broker(c.brokerId,Utils.listenerListToEndPoints(c.listeners)).getBrokerEndPoint(ProtocolType.PLAINTEXT)) + val brokerEndPoints = configs.map(c => new Broker(c.brokerId,Utils.listenerListToEndPoints(c.listeners)).getBrokerEndPoint(SecurityProtocol.PLAINTEXT)) override def setUp() { super.setUp() diff --git a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala index d5bebc6..66251b0 100644 --- a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala +++ b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala @@ -19,7 +19,7 @@ package kafka.network; import java.net._ import java.io._ -import kafka.cluster.{ProtocolType, EndPoint} +import kafka.cluster.{SecurityProtocol, EndPoint} import org.junit._ import org.scalatest.junit.JUnitSuite import java.util.Random @@ -35,7 +35,7 @@ import kafka.utils.TestUtils class SocketServerTest extends JUnitSuite { val server: SocketServer = new SocketServer(0, - List(EndPoint(null,kafka.utils.TestUtils.choosePort,ProtocolType.PLAINTEXT)), + List(EndPoint(null,kafka.utils.TestUtils.choosePort,SecurityProtocol.PLAINTEXT)), numProcessorThreads = 1, maxQueuedRequests = 50, sendBufferSize = 300000, @@ -71,7 +71,7 @@ class SocketServerTest extends JUnitSuite { channel.sendResponse(new RequestChannel.Response(request.processor, request, send)) } - def connect() = new Socket("localhost", server.endpoints.find(_.protocolType == ProtocolType.PLAINTEXT).get.port) + def connect() = new Socket("localhost", server.endpoints.find(_.protocolType == SecurityProtocol.PLAINTEXT).get.port) @After def cleanup() { diff --git a/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala b/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala index 708fa99..5630ddd 100644 --- a/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala +++ b/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala @@ -21,7 +21,7 @@ import java.net.SocketTimeoutException import java.util.Properties import junit.framework.Assert import kafka.admin.AdminUtils -import kafka.cluster.ProtocolType +import kafka.cluster.SecurityProtocol import kafka.integration.KafkaServerTestHarness import kafka.message._ import kafka.server.KafkaConfig @@ -40,7 +40,7 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testReachableServer() { val server = servers.head - val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val port = server.socketServer.endpoints.find(_.protocolType==SecurityProtocol.PLAINTEXT).get.port val props = TestUtils.getSyncProducerConfig(port) val producer = new SyncProducer(new SyncProducerConfig(props)) @@ -76,7 +76,7 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testEmptyProduceRequest() { val server = servers.head - val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val port = server.socketServer.endpoints.find(_.protocolType==SecurityProtocol.PLAINTEXT).get.port val props = TestUtils.getSyncProducerConfig(port) val correlationId = 0 @@ -94,7 +94,7 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testMessageSizeTooLarge() { val server = servers.head - val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val port = server.socketServer.endpoints.find(_.protocolType==SecurityProtocol.PLAINTEXT).get.port val props = TestUtils.getSyncProducerConfig(port) val producer = new SyncProducer(new SyncProducerConfig(props)) @@ -122,7 +122,7 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testMessageSizeTooLargeWithAckZero() { val server = servers.head - val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val port = server.socketServer.endpoints.find(_.protocolType==SecurityProtocol.PLAINTEXT).get.port val props = TestUtils.getSyncProducerConfig(port) props.put("request.required.acks", "0") @@ -150,7 +150,7 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testProduceCorrectlyReceivesResponse() { val server = servers.head - val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val port = server.socketServer.endpoints.find(_.protocolType==SecurityProtocol.PLAINTEXT).get.port val props = TestUtils.getSyncProducerConfig(port) val producer = new SyncProducer(new SyncProducerConfig(props)) @@ -197,7 +197,7 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { val timeoutMs = 500 val server = servers.head - val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val port = server.socketServer.endpoints.find(_.protocolType==SecurityProtocol.PLAINTEXT).get.port val props = TestUtils.getSyncProducerConfig(port) val producer = new SyncProducer(new SyncProducerConfig(props)) @@ -224,7 +224,7 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { @Test def testProduceRequestWithNoResponse() { val server = servers.head - val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val port = server.socketServer.endpoints.find(_.protocolType==SecurityProtocol.PLAINTEXT).get.port val props = TestUtils.getSyncProducerConfig(port) val correlationId = 0 val clientId = SyncProducerConfig.DefaultClientId @@ -240,7 +240,7 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness { def testNotEnoughReplicas() { val topicName = "minisrtest" val server = servers.head - val port = server.socketServer.endpoints.find(_.protocolType==ProtocolType.PLAINTEXT).get.port + val port = server.socketServer.endpoints.find(_.protocolType==SecurityProtocol.PLAINTEXT).get.port val props = TestUtils.getSyncProducerConfig(port) props.put("request.required.acks", "-1") diff --git a/core/src/test/scala/unit/kafka/server/AdvertiseBrokerTest.scala b/core/src/test/scala/unit/kafka/server/AdvertiseBrokerTest.scala index 49e3933..d503cbb 100644 --- a/core/src/test/scala/unit/kafka/server/AdvertiseBrokerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AdvertiseBrokerTest.scala @@ -17,7 +17,7 @@ package kafka.server -import kafka.cluster.ProtocolType +import kafka.cluster.SecurityProtocol import org.scalatest.junit.JUnit3Suite import kafka.zk.ZooKeeperTestHarness import junit.framework.Assert._ @@ -32,7 +32,7 @@ class AdvertiseBrokerTest extends JUnit3Suite with ZooKeeperTestHarness { override def setUp() { super.setUp() val props = TestUtils.createBrokerConfig(brokerId, TestUtils.choosePort()) - props.put("advertised.listeners",ProtocolType.PLAINTEXT.toString+"://"+advertisedHostName+":"+advertisedPort.toString) + props.put("advertised.listeners",SecurityProtocol.PLAINTEXT.toString+"://"+advertisedHostName+":"+advertisedPort.toString) server = TestUtils.createServer(new KafkaConfig(props)) } @@ -45,7 +45,7 @@ class AdvertiseBrokerTest extends JUnit3Suite with ZooKeeperTestHarness { def testBrokerAdvertiseToZK { val brokerInfo = ZkUtils.getBrokerInfo(zkClient, brokerId) - val endpoint = brokerInfo.get.endPoints.find(_.protocolType == ProtocolType.PLAINTEXT).get + val endpoint = brokerInfo.get.endPoints.find(_.protocolType == SecurityProtocol.PLAINTEXT).get assertEquals(advertisedHostName, endpoint.host) assertEquals(advertisedPort, endpoint.port) } diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala index 649e35e..8fefedd 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala @@ -17,7 +17,7 @@ package kafka.server -import kafka.cluster.ProtocolType +import kafka.cluster.SecurityProtocol import org.junit.Test import junit.framework.Assert._ import org.scalatest.junit.JUnit3Suite @@ -96,7 +96,7 @@ class KafkaConfigTest extends JUnit3Suite { val serverConfig = new KafkaConfig(props) val endpoints = Utils.listenerListToEndPoints(serverConfig.advertisedListeners) - val endpoint = endpoints.find(_.protocolType == ProtocolType.PLAINTEXT).get + val endpoint = endpoints.find(_.protocolType == SecurityProtocol.PLAINTEXT).get assertEquals(endpoint.host, hostName) assertEquals(endpoint.port, port) } @@ -112,7 +112,7 @@ class KafkaConfigTest extends JUnit3Suite { val serverConfig = new KafkaConfig(props) val endpoints = Utils.listenerListToEndPoints(serverConfig.advertisedListeners) - val endpoint = endpoints.find(_.protocolType == ProtocolType.PLAINTEXT).get + val endpoint = endpoints.find(_.protocolType == SecurityProtocol.PLAINTEXT).get assertEquals(endpoint.host, advertisedHostName) assertEquals(endpoint.port, advertisedPort) diff --git a/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala b/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala index 79dff89..785b271 100644 --- a/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala +++ b/core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala @@ -23,7 +23,7 @@ import kafka.utils.TestUtils._ import junit.framework.Assert._ import kafka.utils.{ZkUtils, Utils, TestUtils} import kafka.controller.{ControllerContext, LeaderIsrAndControllerEpoch, ControllerChannelManager} -import kafka.cluster.{ProtocolType, Broker} +import kafka.cluster.{SecurityProtocol, Broker} import kafka.common.ErrorMapping import kafka.api._ diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index a7f48a9..989ec39 100644 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -35,7 +35,7 @@ import kafka.server._ import kafka.producer._ import kafka.message._ import kafka.api._ -import kafka.cluster.{ProtocolType, Broker} +import kafka.cluster.{SecurityProtocol, Broker} import kafka.consumer.{KafkaStream, ConsumerConfig} import kafka.serializer.{StringEncoder, DefaultEncoder, Encoder} import kafka.common.TopicAndPartition @@ -147,7 +147,7 @@ object TestUtils extends Logging { def getBrokerListStrFromConfigs(configs: Seq[KafkaConfig]): String = { configs.map(c => { - val endpoint = Utils.listenerListToEndPoints(c.listeners).find(_.protocolType == ProtocolType.PLAINTEXT).get + val endpoint = Utils.listenerListToEndPoints(c.listeners).find(_.protocolType == SecurityProtocol.PLAINTEXT).get formatAddress(endpoint.host, endpoint.port) }).mkString(",") } -- 1.9.3 (Apple Git-50) From 14525eedd612287dbc12fc76203737dbc3889cfe Mon Sep 17 00:00:00 2001 From: Gwen Shapira Date: Thu, 25 Dec 2014 10:31:30 -0800 Subject: [PATCH 8/9] fixing unit test after rename of ProtocolType to SecurityProtocol --- core/src/main/scala/kafka/consumer/ConsumerConfig.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/core/src/main/scala/kafka/consumer/ConsumerConfig.scala b/core/src/main/scala/kafka/consumer/ConsumerConfig.scala index 054bd62..7ebb240 100644 --- a/core/src/main/scala/kafka/consumer/ConsumerConfig.scala +++ b/core/src/main/scala/kafka/consumer/ConsumerConfig.scala @@ -46,7 +46,6 @@ object ConsumerConfig extends Config { val OffsetsChannelSocketTimeoutMs = 10000 val OffsetsCommitMaxRetries = 5 val OffsetsStorage = "zookeeper" - val SecurityProtocol = "PLAINTEXT" val MirrorTopicsWhitelistProp = "mirror.topics.whitelist" val MirrorTopicsBlacklistProp = "mirror.topics.blacklist" -- 1.9.3 (Apple Git-50) From 448911e8d1ca67ebdeebe6d1143bdbfa97da1da6 Mon Sep 17 00:00:00 2001 From: Gwen Shapira Date: Sat, 27 Dec 2014 12:02:34 -0800 Subject: [PATCH 9/9] Following Joe's advice, added security protocol enum on client side, and modified protocol to use ID instead of string. --- .../org/apache/kafka/clients/NetworkClient.java | 5 ++- .../kafka/clients/producer/KafkaProducer.java | 3 +- .../org/apache/kafka/common/protocol/Protocol.java | 2 +- .../kafka/common/protocol/SecurityProtocol.java | 47 ++++++++++++++++++++++ .../kafka/common/requests/MetadataRequest.java | 13 +++--- .../apache/kafka/clients/NetworkClientTest.java | 3 +- .../kafka/common/requests/RequestResponseTest.java | 3 +- .../scala/kafka/api/ConsumerMetadataRequest.scala | 6 +-- .../scala/kafka/api/ConsumerMetadataResponse.scala | 2 +- .../scala/kafka/api/TopicMetadataRequest.scala | 8 ++-- .../scala/kafka/api/TopicMetadataResponse.scala | 2 +- 11 files changed, 73 insertions(+), 21 deletions(-) create mode 100644 clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index 13387ba..b6530fb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -27,6 +27,7 @@ import org.apache.kafka.common.network.NetworkSend; import org.apache.kafka.common.network.Selectable; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ProtoUtils; +import org.apache.kafka.common.protocol.SecurityProtocol; import org.apache.kafka.common.protocol.types.Struct; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; @@ -81,7 +82,7 @@ public class NetworkClient implements KafkaClient { private long lastNoNodeAvailableMs; /* protocol used for communication to brokers */ - private String securityProtocol; + private SecurityProtocol securityProtocol; public NetworkClient(Selectable selector, Metadata metadata, @@ -90,7 +91,7 @@ public class NetworkClient implements KafkaClient { long reconnectBackoffMs, int socketSendBuffer, int socketReceiveBuffer, - String securityProtocol) { + SecurityProtocol securityProtocol) { this.selector = selector; this.metadata = metadata; this.clientId = clientId; diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index 96487ae..395e542 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -41,6 +41,7 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.network.Selector; +import org.apache.kafka.common.protocol.SecurityProtocol; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.Record; import org.apache.kafka.common.record.Records; @@ -163,7 +164,7 @@ public class KafkaProducer implements Producer { config.getLong(ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG), config.getInt(ProducerConfig.SEND_BUFFER_CONFIG), config.getInt(ProducerConfig.RECEIVE_BUFFER_CONFIG), - config.getString(ProducerConfig.SECURITY_PROTOCOL)); + SecurityProtocol.valueOf(config.getString(ProducerConfig.SECURITY_PROTOCOL))); this.sender = new Sender(client, this.metadata, this.accumulator, diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java index 29ce7af..f1a6db3 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java @@ -50,7 +50,7 @@ public class Protocol { public static Schema METADATA_REQUEST_V1 = new Schema(new Field("topics", new ArrayOf(STRING), "An array of topics to fetch metadata for. If no topics are specified fetch metadtata for all topics."), - new Field("security_protocol",STRING,"The response should contain broker endpoints that correspond to this protocol")); + new Field("security_protocol",INT16,"The response should contain broker endpoints that correspond to this protocol")); public static Schema BROKER = new Schema(new Field("node_id", INT32, "The broker id."), new Field("host", STRING, "The hostname of the broker."), diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java b/clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java new file mode 100644 index 0000000..1d789ad --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.protocol; + +import java.util.HashMap; +import java.util.Map; + +public enum SecurityProtocol { + PLAINTEXT(0,"PLAINTEXT"); + + private static Map codeToSecurityProtocol = new HashMap(); + + static { + for (SecurityProtocol proto: SecurityProtocol.values()) { + codeToSecurityProtocol.put(proto.id,proto); + } + } + + /** the permenant and immutable id of a security protocol -- this can't change, and must match kafka.cluster.SecurityProtocol */ + public final short id; + + /** a name of the security protocol. This may be used by client configuration */ + public final String name; + + private SecurityProtocol(int id, String name) { + this.id = (short) id; + this.name = name; + } + + public static String getName(int id) { + return codeToSecurityProtocol.get(id).name; + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java index 4e53ad9..6549a08 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java @@ -18,18 +18,19 @@ import java.util.List; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ProtoUtils; +import org.apache.kafka.common.protocol.SecurityProtocol; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Struct; public class MetadataRequest extends AbstractRequestResponse { public static Schema curSchema = ProtoUtils.currentRequestSchema(ApiKeys.METADATA.id); - public static String DEFAULT_PROTOCOL = "PLAINTEXT"; + public static Short DEFAULT_PROTOCOL = SecurityProtocol.PLAINTEXT.id; private static String TOPICS_KEY_NAME = "topics"; private static String PROTOCOL_KEY_NAME = "security_protocol"; private final List topics; - private String protocol; + private short protocol; /* Constructor for V0 */ public MetadataRequest(List topics) { @@ -39,12 +40,12 @@ public class MetadataRequest extends AbstractRequestResponse { } /* Constructor for V1 */ - public MetadataRequest(List topics, String protocol) { + public MetadataRequest(List topics, SecurityProtocol protocol) { super(new Struct(ProtoUtils.requestSchema(ApiKeys.METADATA.id,1))); struct.set(TOPICS_KEY_NAME, topics.toArray()); - struct.set(PROTOCOL_KEY_NAME, protocol); + struct.set(PROTOCOL_KEY_NAME, protocol.id); this.topics = topics; - this.protocol = protocol; + this.protocol = protocol.id; } public MetadataRequest(Struct struct) { @@ -55,7 +56,7 @@ public class MetadataRequest extends AbstractRequestResponse { topics.add((String) topicObj); } if (struct.hasField(PROTOCOL_KEY_NAME)) { - protocol = struct.getString(PROTOCOL_KEY_NAME); + protocol = struct.getShort(PROTOCOL_KEY_NAME); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java index 749fb47..0b96d9a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java @@ -17,6 +17,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.network.NetworkReceive; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ProtoUtils; +import org.apache.kafka.common.protocol.SecurityProtocol; import org.apache.kafka.common.protocol.types.Struct; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.ProduceRequest; @@ -37,7 +38,7 @@ public class NetworkClientTest { private int nodeId = 1; private Cluster cluster = TestUtils.singletonCluster("test", nodeId); private Node node = cluster.nodes().get(0); - private NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 0, 64 * 1024, 64 * 1024,"PLAINTEXT"); + private NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 0, 64 * 1024, 64 * 1024, SecurityProtocol.PLAINTEXT); @Before public void setup() { diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java index 58b5c3e..59edcf0 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java @@ -17,6 +17,7 @@ import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.protocol.SecurityProtocol; import org.junit.Test; import java.lang.reflect.Method; @@ -124,7 +125,7 @@ public class RequestResponseTest { } private AbstractRequestResponse createMetadataRequest() { - return new MetadataRequest(Arrays.asList("topic1"), "PLAINTEXT"); + return new MetadataRequest(Arrays.asList("topic1"), SecurityProtocol.PLAINTEXT); } private AbstractRequestResponse createMetadataResponse() { diff --git a/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala b/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala index 78ac228..8168c1b 100644 --- a/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala +++ b/core/src/main/scala/kafka/api/ConsumerMetadataRequest.scala @@ -35,7 +35,7 @@ object ConsumerMetadataRequest { val clientId = ApiUtils.readShortString(buffer) // request val group = ApiUtils.readShortString(buffer) - val securityProtocol = SecurityProtocol.withName(ApiUtils.readShortString(buffer)) + val securityProtocol = SecurityProtocol(ApiUtils.readShortInRange(buffer,"security protocol id",(0,SecurityProtocol.maxId.toShort))) ConsumerMetadataRequest(group, versionId, correlationId, clientId, securityProtocol) } @@ -53,7 +53,7 @@ case class ConsumerMetadataRequest(group: String, 4 + /* correlationId */ ApiUtils.shortStringLength(clientId) + ApiUtils.shortStringLength(group) + - ApiUtils.shortStringLength(securityProtocol.toString) + 2 def writeTo(buffer: ByteBuffer) { // envelope @@ -63,7 +63,7 @@ case class ConsumerMetadataRequest(group: String, // consumer metadata request ApiUtils.writeShortString(buffer, group) - ApiUtils.writeShortString(buffer, securityProtocol.toString) + buffer.putShort(securityProtocol.id.toShort) } override def handleError(e: Throwable, requestChannel: RequestChannel, request: RequestChannel.Request): Unit = { diff --git a/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala b/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala index f9a9101..1183aae 100644 --- a/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala +++ b/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala @@ -18,7 +18,7 @@ package kafka.api import java.nio.ByteBuffer -import kafka.cluster.{SecurityProtocol, BrokerEndpoint, Broker} +import kafka.cluster.BrokerEndpoint import kafka.common.ErrorMapping object ConsumerMetadataResponse { diff --git a/core/src/main/scala/kafka/api/TopicMetadataRequest.scala b/core/src/main/scala/kafka/api/TopicMetadataRequest.scala index 423eda6..7572233 100644 --- a/core/src/main/scala/kafka/api/TopicMetadataRequest.scala +++ b/core/src/main/scala/kafka/api/TopicMetadataRequest.scala @@ -55,9 +55,9 @@ object TopicMetadataRequest extends Logging { var securityProtocol = org.apache.kafka.common.requests.MetadataRequest.DEFAULT_PROTOCOL if (versionId == 1) { - securityProtocol = readShortString(buffer) + securityProtocol = readShortInRange(buffer,"security protocol id",(0,SecurityProtocol.maxId.toShort)) } - new TopicMetadataRequest(versionId, correlationId, clientId, SecurityProtocol.withName(securityProtocol), topics.toList) + new TopicMetadataRequest(versionId, correlationId, clientId, SecurityProtocol(securityProtocol.toInt), topics.toList) } } @@ -77,7 +77,7 @@ case class TopicMetadataRequest(val versionId: Short, writeShortString(buffer, clientId) buffer.putInt(topics.size) topics.foreach(topic => writeShortString(buffer, topic)) - writeShortString(buffer, securityProtocol.toString) + buffer.putShort(securityProtocol.id.toShort) } def sizeInBytes(): Int = { @@ -86,7 +86,7 @@ case class TopicMetadataRequest(val versionId: Short, shortStringLength(clientId) + /* client id */ 4 + /* number of topics */ topics.foldLeft(0)(_ + shortStringLength(_)) + /* topics */ - shortStringLength(securityProtocol.toString) /* security protocol */ + 2 /* security protocol */ } override def toString(): String = { diff --git a/core/src/main/scala/kafka/api/TopicMetadataResponse.scala b/core/src/main/scala/kafka/api/TopicMetadataResponse.scala index c55f60c..4de566b 100644 --- a/core/src/main/scala/kafka/api/TopicMetadataResponse.scala +++ b/core/src/main/scala/kafka/api/TopicMetadataResponse.scala @@ -17,7 +17,7 @@ package kafka.api -import kafka.cluster.{SecurityProtocol, BrokerEndpoint, Broker} +import kafka.cluster.BrokerEndpoint import java.nio.ByteBuffer object TopicMetadataResponse { -- 1.9.3 (Apple Git-50)